In [1]:
import glob
import math
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import random
import sklearn.metrics as metrics

from tensorflow.keras import optimizers
from tensorflow.keras import backend
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import add, concatenate, Conv2D, Dense, Dropout, Flatten, Input, Lambda, Reshape
from tensorflow.keras.layers import Activation, AveragePooling2D, BatchNormalization, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import to_categorical


%matplotlib inline
In [2]:
                            # Set up 'ggplot' style
plt.style.use('ggplot')     # if want to use the default style, set 'classic'
plt.rcParams['ytick.right']     = True
plt.rcParams['ytick.labelright']= True
plt.rcParams['ytick.left']      = False
plt.rcParams['ytick.labelleft'] = False
plt.rcParams['font.family']     = 'Arial'
In [3]:
# where am i?
%pwd
Out[3]:
'C:\\Users\\david\\Documents\\ImageNet'
In [4]:
%ls
 Volume in drive C is Acer
 Volume Serial Number is F2E5-64E8

 Directory of C:\Users\david\Documents\ImageNet

09/22/2019  03:14 PM    <DIR>          .
09/22/2019  03:14 PM    <DIR>          ..
09/09/2019  01:02 AM                43 .gitattributes
08/22/2019  11:06 PM                26 .gitignore
09/22/2019  02:26 PM    <DIR>          .ipynb_checkpoints
09/20/2019  11:24 PM         2,012,453 AlexNet_Model_Train_Test.ipynb
09/20/2019  11:23 PM         2,248,246 Create_Train_Test_Set.ipynb
09/14/2019  03:53 PM    <DIR>          data
08/22/2019  11:09 PM           455,126 Download-ImageNet.html
09/09/2019  12:35 AM           288,923 Download-ImageNet.ipynb
09/03/2019  09:40 PM           367,769 Download-Pexels.html
09/09/2019  12:35 AM            94,549 Download-Pexels.ipynb
09/22/2019  03:12 PM                 0 FlowerPower.csv
09/21/2019  06:45 AM            24,730 FlowerPower_best9307.csv
09/22/2019  03:39 AM        62,618,096 FlowerPower_InceptionResNetV2best9337.hdf5
09/22/2019  10:18 AM        62,618,096 FlowerPower_InceptionResNetV2best9467.hdf5
09/21/2019  12:22 PM        98,136,496 FlowerPower_InceptionV4best9305.hdf5
09/21/2019  02:47 AM        98,136,496 FlowerPower_InceptionV4best9307.hdf5
09/21/2019  06:03 PM        98,136,496 FlowerPower_InceptionV4best9418.hdf5
08/17/2019  11:53 AM           124,162 ImageNet-Flowers.txt
08/17/2019  03:54 PM            75,692 ImageNet-Fungus.txt
08/17/2019  03:57 PM            81,424 ImageNet-Rocks.txt
09/21/2019  01:06 PM            66,035 Inception-ResNet-v1 & v2.ipynb
09/15/2019  03:16 PM            58,343 Inception-v4.ipynb
09/14/2019  11:39 PM            26,103 model.pdf
09/21/2019  09:58 AM    <DIR>          npz
09/03/2019  09:40 PM           128,688 Pexels-Flowers.txt
09/03/2019  09:40 PM            28,575 Pexels-Umbrellas.txt
09/14/2019  04:01 PM    <DIR>          readings
08/22/2019  11:02 PM                44 README.md
09/20/2019  11:19 PM           303,323 Reshape_Resize_Images.ipynb
09/21/2019  01:06 PM             2,280 Squeeze & Excitation.ipynb
09/09/2019  12:48 AM         8,546,104 train_Neural_Network (Conv2D, 96-0.8).html
09/22/2019  06:50 AM         2,346,911 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try17 = 93.37.html
09/22/2019  02:01 PM         2,354,430 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data + Rediced Layers, try18 = 94.67).html
09/15/2019  10:09 PM         2,427,075 train_Neural_Network (InceptionResNetV2, 96-0.8, Added data, try13).html
09/15/2019  02:35 AM        12,032,935 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try10).html
09/15/2019  11:36 AM         2,387,331 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try11).html
09/15/2019  05:42 PM         2,291,568 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try12).html
09/16/2019  06:31 AM         5,790,782 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try14).html
09/21/2019  09:52 AM        14,198,219 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try15 = 93.07).html
09/21/2019  06:41 PM         5,458,724 train_Neural_Network (InceptionV4, 96-0.8, Dropout + BatchNorm + Added data, try16 = 94.18).html
09/14/2019  08:36 PM         7,071,416 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp + Added data, try9).html
09/11/2019  01:01 AM         4,494,650 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try6).html
09/11/2019  10:59 PM         6,116,768 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try7).html
09/12/2019  02:35 AM         5,851,809 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer + RMSProp, try8).html
09/09/2019  03:08 AM         3,900,219 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try3).html
09/09/2019  11:09 PM         6,528,529 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try4).html
09/10/2019  08:44 PM         6,636,754 train_Neural_Network (ResNetV1, 96-0.8, Dropout + added layer, try5).html
09/09/2019  01:32 AM         6,583,279 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try1).html
09/09/2019  02:40 AM         6,300,696 train_Neural_Network (ResNetV1, 96-0.8, Dropout, try2).html
09/09/2019  01:23 AM         6,446,135 train_Neural_Network (ResNetV1, 96-0.8, no Dropout, try1).html
09/22/2019  03:14 PM         2,065,808 train_Neural_Network.ipynb
09/20/2019  11:24 PM         1,838,211 VGG_Model_Setup.ipynb
09/20/2019  11:24 PM            17,772 VGG_Model_Train_Test.ipynb
09/20/2019  11:24 PM         4,184,405 VGG_Model_Train_Test_baseline.ipynb
09/20/2019  11:24 PM         2,184,240 VGG_Model_Train_Test_Mod1.ipynb
09/20/2019  11:24 PM         2,112,869 VGG_Model_Train_Test_Mod2.ipynb
09/20/2019  11:24 PM         1,952,795 VGG_Model_Train_Test_Mod3.ipynb
              53 File(s)    558,152,648 bytes
               6 Dir(s)  52,533,428,224 bytes free
In [5]:
flowers = glob.glob('./data/flr_*.jpg')
fungus = glob.glob('./data/fgs_*.jpg')
rocks = glob.glob('./data/rck_*.jpg')

pixel_flowers = glob.glob('./data/pxl_flower_*.jpeg')
pixel_umbrella = glob.glob('./data/pxl_umbrella_*.jpeg')
print("There are %s, %s flower, %s fungus, %s rock and %s umbrella pictures" %(len(flowers), len(pixel_flowers), len(fungus), len(rocks), len(pixel_umbrella)))
There are 1269, 1792 flower, 856 fungus, 1007 rock and 420 umbrella pictures
In [6]:
# Randomly show 10 examples of the images
from IPython.display import Image
    
dataset = flowers #flowers #fungus #rocks

for i in range(0, 5):
    index = random.randint(0, len(dataset)-1)   
    print("Showing:", dataset[index])
    
    img = mpimg.imread(dataset[index])
    imgplot = plt.imshow(img)
    plt.show()

#Image(dataset[index])
Showing: ./data\flr_01653.jpg
Showing: ./data\flr_00690.jpg
Showing: ./data\flr_00498.jpg
Showing: ./data\flr_00155.jpg
Showing: ./data\flr_01023.jpg

Extract the training and testing datasets

In [7]:
# Load the data
trDatOrg       = np.load('npz/flrnonflr-train-imgs96-0.8+.npz')['arr_0']
trLblOrg       = np.load('npz/flrnonflr-train-labels96-0.8+.npz')['arr_0']
tsDatOrg       = np.load('npz/flrnonflr-test-imgs96-0.8+.npz')['arr_0']
tsLblOrg       = np.load('npz/flrnonflr-test-labels96-0.8+.npz')['arr_0']
In [8]:
print("For the training and test datasets:")
print("The shapes are %s, %s, %s, %s" \
      %(trDatOrg.shape, trLblOrg.shape, tsDatOrg.shape, tsLblOrg.shape))
For the training and test datasets:
The shapes are (14784, 96, 96, 3), (14784,), (3696, 96, 96, 3), (3696,)
In [9]:
# Randomly show 10 examples of the images

data = tsDatOrg
label = tsLblOrg

for i in range(20):
    index = random.randint(0, len(data)-1)
    print("Showing %s index image, It is %s" %(index, label[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 1909 index image, It is 1.0
Showing 973 index image, It is 1.0
Showing 3068 index image, It is 0.0
Showing 3082 index image, It is 0.0
Showing 2769 index image, It is 0.0
Showing 3251 index image, It is 0.0
Showing 2394 index image, It is 0.0
Showing 1816 index image, It is 1.0
Showing 3121 index image, It is 0.0
Showing 1859 index image, It is 1.0
Showing 2081 index image, It is 1.0
Showing 2528 index image, It is 0.0
Showing 187 index image, It is 1.0
Showing 1678 index image, It is 1.0
Showing 3407 index image, It is 0.0
Showing 1118 index image, It is 1.0
Showing 1273 index image, It is 1.0
Showing 473 index image, It is 1.0
Showing 3113 index image, It is 0.0
Showing 180 index image, It is 1.0
In [10]:
# Convert the data into 'float32'
# Rescale the values from 0~255 to 0~1
trDat       = trDatOrg.astype('float32')/255
tsDat       = tsDatOrg.astype('float32')/255

# Retrieve the row size of each image
# Retrieve the column size of each image
imgrows     = trDat.shape[1]
imgclms     = trDat.shape[2]
channel     = 3

# # reshape the data to be [samples][width][height][channel]
# # This is required by Keras framework
# trDat       = trDat.reshape(trDat.shape[0], imgrows, imgclms, channel)
# tsDat       = tsDat.reshape(tsDat.shape[0], imgrows, imgclms, channel)

# Perform one hot encoding on the labels
# Retrieve the number of classes in this problem
trLbl       = to_categorical(trLblOrg)
tsLbl       = to_categorical(tsLblOrg)
num_classes = tsLbl.shape[1]
In [11]:
# fix random seed for reproducibility
seed = 29
np.random.seed(seed)


modelname = 'FlowerPower'

#optmz = optimizers.Adam(lr=0.001)
optmz = optimizers.RMSprop(lr=0.001)
In [12]:
# Baseline Model -> func: createBaselineModel()

def createBaselineModel():
    inputs = Input(shape=(imgrows, imgclms, channel))
    x = Conv2D(30, (4, 4), activation='relu')(inputs)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(50, (4, 4), activation='relu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.3)(x)
    x = Flatten()(x)
    x = Dense(32, activation='relu')(x)
    x = Dense(num_classes, activation='softmax')(x)
    
    model = Model(inputs=[inputs],outputs=x)
    
    model.compile(loss='categorical_crossentropy', 
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
In [13]:
# ResNetV1 -> func: createResNetV1()
def resLyr(inputs,
           numFilters=16,
           kernelSz=3,
           strides=1,
           activation='relu',
           batchNorm=True,
           convFirst=True,
           lyrName=None):
    convLyr = Conv2D(numFilters, kernel_size=kernelSz, strides=strides, 
                     padding='same', kernel_initializer='he_normal', 
                     kernel_regularizer=l2(1e-4), 
                     name=lyrName+'_conv' if lyrName else None)
    x = inputs
    if convFirst:
        x = convLyr(x)
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation,name=lyrName+'_'+activation if lyrName else None)(x)
    else:
        if batchNorm:
            x = BatchNormalization(name=lyrName+'_bn' if lyrName else None)(x)
        if activation is not None:
            x = Activation(activation, name=lyrName+'_'+activation if lyrName else None)(x)
        x = convLyr(x)
    return x


def resBlkV1(inputs,
             numFilters=16,
             numBlocks=3,
             downsampleOnFirst=True,
             names=None):
    x = inputs
    for run in range(0,numBlocks):
        strides = 1
        blkStr = str(run+1)
        if downsampleOnFirst and run == 0:
            strides = 2
        y = resLyr(inputs=x, numFilters=numFilters, strides=strides,
                   lyrName=names+'_Blk'+blkStr+'_Res1' if names else None)
        y = resLyr(inputs=y, numFilters=numFilters, activation=None,
                   lyrName=names+'_Blk'+blkStr+'_Res2' if names else None)
        if downsampleOnFirst and run == 0:
            x = resLyr(inputs=x, numFilters=numFilters, kernelSz=1,
                       strides=strides, activation=None, batchNorm=False,
                       lyrName=names+'_Blk'+blkStr+'_lin' if names else None)
        x = add([x,y], name=names+'_Blk'+blkStr+'_add' if names else None)
        x = Activation('relu', name=names+'_Blk'+blkStr+'_relu' if names else None)(x)
    return x

def createResNetV1(inputShape=(imgrows, imgclms, channel),
                   numClasses=2):
    inputs = Input(shape=inputShape)
    v = resLyr(inputs, lyrName='Inpt')
    v = resBlkV1(inputs=v, numFilters=16, numBlocks=3,
                 downsampleOnFirst=False, names='Stg1')
    v = Dropout(0.30)(v)
    v = resBlkV1(inputs=v, numFilters=32, numBlocks=3,
                 downsampleOnFirst=True, names='Stg2')
    v = Dropout(0.40)(v)
    v = resBlkV1(inputs=v, numFilters=64, numBlocks=3,
                 downsampleOnFirst=True, names='Stg3')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=True, names='Stg4')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=128, numBlocks=3,
                 downsampleOnFirst=False, names='Stg5')
    v = Dropout(0.50)(v)
    v = resBlkV1(inputs=v, numFilters=256, numBlocks=3,
                 downsampleOnFirst=True, names='Stg6')
    v = Dropout(0.50)(v)
    v = AveragePooling2D(pool_size=6, name='AvgPool')(v)
    v = Flatten()(v) 
    outputs = Dense(numClasses, activation='softmax', 
                    kernel_initializer='he_normal')(v)
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model
In [14]:
# Mostly Original # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = inception_a_block(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = inception_b_block(x)
    x = reduction_b_block(x)
    
    x = inception_c_block(x)
    x = inception_c_block(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [15]:
# Modified2 # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=192, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=224, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=224, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=256, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=384, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=448, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=512, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=256, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=256, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=320, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=320, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1536)(x) # Changed
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [16]:
# Modified #(halfed) # Inception-v4 -> func: create_inception_v4()

def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=48, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    x_R2_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=32, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=32, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_a_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=48, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_ER1_2)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_2, x_ER1_3])
    return x

def inception_b_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_2 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_1)
    x_MR1_3 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_MR1_2)
    
    x_ER1_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=96, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=112, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_4 = Conv2D(filters=112, kernel_size=(1, 7), strides=1, padding='same')(x_ER1_3)
    x_ER1_5 = Conv2D(filters=128, kernel_size=(7, 1), strides=1, padding='same')(x_ER1_4)

    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_3, x_ER1_5])
    return x

def inception_c_block(inputs,
         names=None):
    x = inputs
    
    x_EL1_1 = AveragePooling2D(pool_size=(1, 1), padding='same')(x)
    x_EL1_2 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x_EL1_1)
    
    x_ML1_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_MR1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR1_1_L2 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_MR1_1)
    x_MR1_1_R2 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_MR1_1)
    
    x_ER1_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER1_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_1)
    x_ER1_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_2)
    x_ER1_3_L1 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_ER1_3)
    x_ER1_3_R1 = Conv2D(filters=128, kernel_size=(1, 3), strides=1, padding='same')(x_ER1_3)
    
    x = concatenate([x_EL1_2, x_ML1_1, x_MR1_1_L2, x_MR1_1_R2, x_ER1_3_L1, x_ER1_3_R1])
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 192, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def reduction_b_block(inputs,
                      names=None):

        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x_M_1)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=128, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=160, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    x_R_4 = Conv2D(filters=160, kernel_size=(3, 3), strides=2, padding='valid')(x_R_3)
    
    x = concatenate([x_L_1, x_M_2, x_R_4])
    return x

def create_inception_v4(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-v4"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = inception_a_block(x)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = inception_b_block(x)
    x = BatchNormalization()(x)
    x = reduction_b_block(x)
    
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = inception_c_block(x)
    x = BatchNormalization()(x)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(256)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer=optmz, 
                  metrics=['accuracy'])
    return model 
In [17]:
# Mostly Original # Inception-Res-v2 -> func: create_inception_resnet_v2()
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=160, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=192, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1152, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=192, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=224, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=256, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=2048, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 192, 224, 256, 384
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 192, 192, 256, 384
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 256, 256, 384, 384
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=384, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=256, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=256, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=256, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    x = stem_block(inputs)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = inception_resnet_v2_a_block(x, scale=0.1)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_b_block(x, scale=0.1)
    x = inception_resnet_v2_reduction_b_block(x)
    
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = inception_resnet_v2_c_block(x, scale=0.1)
    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(1792)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [18]:
# Modified # Inception-Res-v2 -> func: create_inception_resnet_v2() 1,3,1 & 0.5
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=80, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=96, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=672, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=114, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1120, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 96, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def create_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 1
    no_b_block = 3
    no_c_block = 1
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.15)
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.15)
    x = BatchNormalization()(x)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.15)
    x = BatchNormalization()(x)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(896)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [19]:
# Modified # SE Inception-Res-v2 -> func: create_se_inception_resnet_v2() 1,3,1 & 0.5
def stem_block(inputs,
         names=None):
    x = inputs
    
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=2, padding='valid')(inputs)
    x = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='valid')(x)
    x = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x)
    x_L1_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    x_R1_1 = Conv2D(filters=96, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x = concatenate([x_L1_1, x_R1_1])
    
    x_L2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_L2_2 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_L2_1)
    
    x_R2_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R2_2 = Conv2D(filters=64, kernel_size=(7, 1), strides=1, padding='same')(x_R2_1)
    x_R2_3 = Conv2D(filters=64, kernel_size=(1, 7), strides=1, padding='same')(x_R2_2)
    x_R2_4 = Conv2D(filters=96, kernel_size=(3, 3), strides=1, padding='valid')(x_R2_3)
    x = concatenate([x_L2_2, x_R2_4])
    
    x_L3_1 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x)
    x_L3_2 = ZeroPadding2D(padding=((0,1), (0,1)))(x_L3_1) # Added due to size mismatch
    x_R3_1 = MaxPooling2D(strides=2, padding='valid')(x)
    x = concatenate([x_L3_2, x_R3_1])
    return x

def inception_resnet_v2_a_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs
    
    x_L_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_M_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_M_2 = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(x_M_1)
    
    x_R_1 = Conv2D(filters=32, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=48, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_M_2, x_R_3])
    x_C_2 = Conv2D(filters=384, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_b_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=64, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=80, kernel_size=(1, 7), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=96, kernel_size=(7, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=672, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)

    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def inception_resnet_v2_c_block(inputs,
                                scale=0.1,
                                names=None):
    x = inputs

    x_L_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    
    x_R_1 = Conv2D(filters=96, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=114, kernel_size=(1, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=128, kernel_size=(3, 1), strides=1, padding='same')(x_R_2)
    
    x_C_1 = concatenate([x_L_1, x_R_3])
    x_C_2 = Conv2D(filters=1120, kernel_size=(1, 1), padding='same', activation='linear')(x_C_1)
    
    x = Lambda(lambda ipt, scale: ipt[0] + ipt[1] * scale,
                      output_shape=backend.int_shape(x)[1:],
                      arguments={'scale': scale})([x, x_C_2])
    x = Activation(activation='relu')(x)
    return x

def reduction_a_block(inputs,
                      network_selected="Inception-v4",
                      names=None):
    if network_selected == "Inception-v4":
        k, l, m, n = 96, 112, 128, 192
    elif network_selected == "Inception-ResNet-v1":
        k, l, m, n = 96, 96, 128, 192
    elif network_selected == "Inception-ResNet-v2":
        k, l, m, n = 128, 128, 96, 192
        
    x = inputs
    
    x_L_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_M_1 = Conv2D(filters=n, kernel_size=(3, 3), strides=2, padding='valid')(x)
    
    x_R_1 = Conv2D(filters=k, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_R_2 = Conv2D(filters=l, kernel_size=(3, 3), strides=1, padding='same')(x_R_1)
    x_R_3 = Conv2D(filters=m, kernel_size=(3, 3), strides=2, padding='valid')(x_R_2)
    
    x = concatenate([x_L_1, x_M_1, x_R_3])
    return x

def inception_resnet_v2_reduction_b_block(inputs,
                      names=None):   
    x = inputs
    
    x_EL_1 = MaxPooling2D(pool_size=(3, 3), strides=2, padding='valid')(x)
    
    x_ML_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ML_2 = Conv2D(filters=192, kernel_size=(3, 3), strides=2, padding='valid')(x_ML_1)
    
    x_MR_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_MR_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_MR_1)
    
    x_ER_1 = Conv2D(filters=128, kernel_size=(1, 1), strides=1, padding='same')(x)
    x_ER_2 = Conv2D(filters=128, kernel_size=(3, 3), strides=1, padding='same')(x_ER_1)
    x_ER_3 = Conv2D(filters=128, kernel_size=(3, 3), strides=2, padding='valid')(x_ER_2)
    
    x = concatenate([x_EL_1, x_ML_2, x_MR_2, x_ER_3])
    return x

def squeeze_excitation_layer(inputs, out_dim, ratio, layer_name):

    x = inputs
    
    x = GlobalAveragePooling2D()(x)
    x = Dense(units=out_dim / ratio, use_bias=True)(x)
    x = Activation(activation='relu')(x)
    x = Dense(units=out_dim, use_bias=True)(x)
    x = Activation(activation='sigmoid')(x)

    x = Reshape([1, 1, out_dim])(x)
    
    scale = Lambda(lambda ipt: ipt[0] * ipt[1] ,
                          output_shape=backend.int_shape(x)[1:])([inputs, x])

    return scale
        
def create_se_inception_resnet_v2(inputShape=(imgrows, imgclms, channel),
                   num_classes=2):
    NETWORK_SELECTED = "Inception-ResNet-v2"
    inputs = Input(shape=inputShape)
    
    batch_norm = True
    no_a_block = 1
    no_b_block = 3
    no_c_block = 1
    reduction_ratio = 4
    
    x = stem_block(inputs)
    
    for i in range(no_a_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_a_block(x, scale=0.15)
        chnl = int(np.shape(x)[-1])
        x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_A'+str(i))
    x = BatchNormalization()(x)
    x = reduction_a_block(x, network_selected=NETWORK_SELECTED)
    chnl = int(np.shape(x)[-1])
    x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_A')
    
    for i in range(no_b_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_b_block(x, scale=0.15)
        chnl = int(np.shape(x)[-1])
        x = squeeze_excitation_layer(x, out_dim=chnl, ratio=reduction_ratio, layer_name='SE_B'+str(i))
    x = BatchNormalization()(x)
    x = inception_resnet_v2_reduction_b_block(x)
    
    for i in range(no_c_block):
        if batch_norm == True:
            x = BatchNormalization()(x)
        x = inception_resnet_v2_c_block(x, scale=0.15)
    x = BatchNormalization()(x)

    x = AveragePooling2D(pool_size=(1,1))(x) # Added (1,1) due to negative dimension
    x = Flatten()(x)
    x = Dense(896)(x)
    x = Dropout(0.2)(x)
    outputs = Dense(num_classes, activation='softmax', 
                    kernel_initializer='he_normal')(x)
    
    model = Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy', 
                  optimizer="Adam", 
                  metrics=['accuracy'])
    return model
In [20]:
# Setup the models
model       = create_se_inception_resnet_v2() # This is meant for training
modelGo     = create_se_inception_resnet_v2() # This is used for final testing

model.summary()
WARNING:tensorflow:From D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\initializers.py:104: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with distribution=normal is deprecated and will be removed in a future version.
Instructions for updating:
`normal` is a deprecated alias for `truncated_normal`
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 96, 96, 3)    0                                            
__________________________________________________________________________________________________
conv2d (Conv2D)                 (None, 47, 47, 32)   896         input_1[0][0]                    
__________________________________________________________________________________________________
conv2d_1 (Conv2D)               (None, 45, 45, 32)   9248        conv2d[0][0]                     
__________________________________________________________________________________________________
conv2d_2 (Conv2D)               (None, 45, 45, 64)   18496       conv2d_1[0][0]                   
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D)    (None, 22, 22, 64)   0           conv2d_2[0][0]                   
__________________________________________________________________________________________________
conv2d_3 (Conv2D)               (None, 22, 22, 96)   55392       conv2d_2[0][0]                   
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 22, 22, 160)  0           max_pooling2d[0][0]              
                                                                 conv2d_3[0][0]                   
__________________________________________________________________________________________________
conv2d_6 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_7 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_6[0][0]                   
__________________________________________________________________________________________________
conv2d_4 (Conv2D)               (None, 22, 22, 64)   10304       concatenate[0][0]                
__________________________________________________________________________________________________
conv2d_8 (Conv2D)               (None, 22, 22, 64)   28736       conv2d_7[0][0]                   
__________________________________________________________________________________________________
conv2d_5 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_4[0][0]                   
__________________________________________________________________________________________________
conv2d_9 (Conv2D)               (None, 20, 20, 96)   55392       conv2d_8[0][0]                   
__________________________________________________________________________________________________
concatenate_1 (Concatenate)     (None, 20, 20, 192)  0           conv2d_5[0][0]                   
                                                                 conv2d_9[0][0]                   
__________________________________________________________________________________________________
conv2d_10 (Conv2D)              (None, 9, 9, 192)    331968      concatenate_1[0][0]              
__________________________________________________________________________________________________
zero_padding2d (ZeroPadding2D)  (None, 10, 10, 192)  0           conv2d_10[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D)  (None, 10, 10, 192)  0           concatenate_1[0][0]              
__________________________________________________________________________________________________
concatenate_2 (Concatenate)     (None, 10, 10, 384)  0           zero_padding2d[0][0]             
                                                                 max_pooling2d_1[0][0]            
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 10, 10, 384)  1536        concatenate_2[0][0]              
__________________________________________________________________________________________________
conv2d_14 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_12 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_15 (Conv2D)              (None, 10, 10, 48)   13872       conv2d_14[0][0]                  
__________________________________________________________________________________________________
conv2d_11 (Conv2D)              (None, 10, 10, 32)   12320       batch_normalization[0][0]        
__________________________________________________________________________________________________
conv2d_13 (Conv2D)              (None, 10, 10, 32)   9248        conv2d_12[0][0]                  
__________________________________________________________________________________________________
conv2d_16 (Conv2D)              (None, 10, 10, 64)   27712       conv2d_15[0][0]                  
__________________________________________________________________________________________________
concatenate_3 (Concatenate)     (None, 10, 10, 128)  0           conv2d_11[0][0]                  
                                                                 conv2d_13[0][0]                  
                                                                 conv2d_16[0][0]                  
__________________________________________________________________________________________________
conv2d_17 (Conv2D)              (None, 10, 10, 384)  49536       concatenate_3[0][0]              
__________________________________________________________________________________________________
lambda (Lambda)                 (None, 10, 10, 384)  0           batch_normalization[0][0]        
                                                                 conv2d_17[0][0]                  
__________________________________________________________________________________________________
activation (Activation)         (None, 10, 10, 384)  0           lambda[0][0]                     
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 384)          0           activation[0][0]                 
__________________________________________________________________________________________________
dense (Dense)                   (None, 96)           36960       global_average_pooling2d[0][0]   
__________________________________________________________________________________________________
activation_1 (Activation)       (None, 96)           0           dense[0][0]                      
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 384)          37248       activation_1[0][0]               
__________________________________________________________________________________________________
activation_2 (Activation)       (None, 384)          0           dense_1[0][0]                    
__________________________________________________________________________________________________
reshape (Reshape)               (None, 1, 1, 384)    0           activation_2[0][0]               
__________________________________________________________________________________________________
lambda_1 (Lambda)               (None, 10, 10, 384)  0           activation[0][0]                 
                                                                 reshape[0][0]                    
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 10, 10, 384)  1536        lambda_1[0][0]                   
__________________________________________________________________________________________________
conv2d_19 (Conv2D)              (None, 10, 10, 128)  49280       batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_20 (Conv2D)              (None, 10, 10, 128)  147584      conv2d_19[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D)  (None, 4, 4, 384)    0           batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_18 (Conv2D)              (None, 4, 4, 192)    663744      batch_normalization_1[0][0]      
__________________________________________________________________________________________________
conv2d_21 (Conv2D)              (None, 4, 4, 96)     110688      conv2d_20[0][0]                  
__________________________________________________________________________________________________
concatenate_4 (Concatenate)     (None, 4, 4, 672)    0           max_pooling2d_2[0][0]            
                                                                 conv2d_18[0][0]                  
                                                                 conv2d_21[0][0]                  
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 672)          0           concatenate_4[0][0]              
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 168)          113064      global_average_pooling2d_1[0][0] 
__________________________________________________________________________________________________
activation_3 (Activation)       (None, 168)          0           dense_2[0][0]                    
__________________________________________________________________________________________________
dense_3 (Dense)                 (None, 672)          113568      activation_3[0][0]               
__________________________________________________________________________________________________
activation_4 (Activation)       (None, 672)          0           dense_3[0][0]                    
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 1, 1, 672)    0           activation_4[0][0]               
__________________________________________________________________________________________________
lambda_2 (Lambda)               (None, 4, 4, 672)    0           concatenate_4[0][0]              
                                                                 reshape_1[0][0]                  
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 4, 4, 672)    2688        lambda_2[0][0]                   
__________________________________________________________________________________________________
conv2d_23 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_24 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_23[0][0]                  
__________________________________________________________________________________________________
conv2d_22 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_2[0][0]      
__________________________________________________________________________________________________
conv2d_25 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_24[0][0]                  
__________________________________________________________________________________________________
concatenate_5 (Concatenate)     (None, 4, 4, 192)    0           conv2d_22[0][0]                  
                                                                 conv2d_25[0][0]                  
__________________________________________________________________________________________________
conv2d_26 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_5[0][0]              
__________________________________________________________________________________________________
lambda_3 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_2[0][0]      
                                                                 conv2d_26[0][0]                  
__________________________________________________________________________________________________
activation_5 (Activation)       (None, 4, 4, 672)    0           lambda_3[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_2 (Glo (None, 672)          0           activation_5[0][0]               
__________________________________________________________________________________________________
dense_4 (Dense)                 (None, 168)          113064      global_average_pooling2d_2[0][0] 
__________________________________________________________________________________________________
activation_6 (Activation)       (None, 168)          0           dense_4[0][0]                    
__________________________________________________________________________________________________
dense_5 (Dense)                 (None, 672)          113568      activation_6[0][0]               
__________________________________________________________________________________________________
activation_7 (Activation)       (None, 672)          0           dense_5[0][0]                    
__________________________________________________________________________________________________
reshape_2 (Reshape)             (None, 1, 1, 672)    0           activation_7[0][0]               
__________________________________________________________________________________________________
lambda_4 (Lambda)               (None, 4, 4, 672)    0           activation_5[0][0]               
                                                                 reshape_2[0][0]                  
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 4, 4, 672)    2688        lambda_4[0][0]                   
__________________________________________________________________________________________________
conv2d_28 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_29 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_28[0][0]                  
__________________________________________________________________________________________________
conv2d_27 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_3[0][0]      
__________________________________________________________________________________________________
conv2d_30 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_29[0][0]                  
__________________________________________________________________________________________________
concatenate_6 (Concatenate)     (None, 4, 4, 192)    0           conv2d_27[0][0]                  
                                                                 conv2d_30[0][0]                  
__________________________________________________________________________________________________
conv2d_31 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_6[0][0]              
__________________________________________________________________________________________________
lambda_5 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_3[0][0]      
                                                                 conv2d_31[0][0]                  
__________________________________________________________________________________________________
activation_8 (Activation)       (None, 4, 4, 672)    0           lambda_5[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_3 (Glo (None, 672)          0           activation_8[0][0]               
__________________________________________________________________________________________________
dense_6 (Dense)                 (None, 168)          113064      global_average_pooling2d_3[0][0] 
__________________________________________________________________________________________________
activation_9 (Activation)       (None, 168)          0           dense_6[0][0]                    
__________________________________________________________________________________________________
dense_7 (Dense)                 (None, 672)          113568      activation_9[0][0]               
__________________________________________________________________________________________________
activation_10 (Activation)      (None, 672)          0           dense_7[0][0]                    
__________________________________________________________________________________________________
reshape_3 (Reshape)             (None, 1, 1, 672)    0           activation_10[0][0]              
__________________________________________________________________________________________________
lambda_6 (Lambda)               (None, 4, 4, 672)    0           activation_8[0][0]               
                                                                 reshape_3[0][0]                  
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 4, 4, 672)    2688        lambda_6[0][0]                   
__________________________________________________________________________________________________
conv2d_33 (Conv2D)              (None, 4, 4, 64)     43072       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_34 (Conv2D)              (None, 4, 4, 80)     35920       conv2d_33[0][0]                  
__________________________________________________________________________________________________
conv2d_32 (Conv2D)              (None, 4, 4, 96)     64608       batch_normalization_4[0][0]      
__________________________________________________________________________________________________
conv2d_35 (Conv2D)              (None, 4, 4, 96)     53856       conv2d_34[0][0]                  
__________________________________________________________________________________________________
concatenate_7 (Concatenate)     (None, 4, 4, 192)    0           conv2d_32[0][0]                  
                                                                 conv2d_35[0][0]                  
__________________________________________________________________________________________________
conv2d_36 (Conv2D)              (None, 4, 4, 672)    129696      concatenate_7[0][0]              
__________________________________________________________________________________________________
lambda_7 (Lambda)               (None, 4, 4, 672)    0           batch_normalization_4[0][0]      
                                                                 conv2d_36[0][0]                  
__________________________________________________________________________________________________
activation_11 (Activation)      (None, 4, 4, 672)    0           lambda_7[0][0]                   
__________________________________________________________________________________________________
global_average_pooling2d_4 (Glo (None, 672)          0           activation_11[0][0]              
__________________________________________________________________________________________________
dense_8 (Dense)                 (None, 168)          113064      global_average_pooling2d_4[0][0] 
__________________________________________________________________________________________________
activation_12 (Activation)      (None, 168)          0           dense_8[0][0]                    
__________________________________________________________________________________________________
dense_9 (Dense)                 (None, 672)          113568      activation_12[0][0]              
__________________________________________________________________________________________________
activation_13 (Activation)      (None, 672)          0           dense_9[0][0]                    
__________________________________________________________________________________________________
reshape_4 (Reshape)             (None, 1, 1, 672)    0           activation_13[0][0]              
__________________________________________________________________________________________________
lambda_8 (Lambda)               (None, 4, 4, 672)    0           activation_11[0][0]              
                                                                 reshape_4[0][0]                  
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 4, 4, 672)    2688        lambda_8[0][0]                   
__________________________________________________________________________________________________
conv2d_41 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_37 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_39 (Conv2D)              (None, 4, 4, 128)    86144       batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_42 (Conv2D)              (None, 4, 4, 128)    147584      conv2d_41[0][0]                  
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D)  (None, 1, 1, 672)    0           batch_normalization_5[0][0]      
__________________________________________________________________________________________________
conv2d_38 (Conv2D)              (None, 1, 1, 192)    221376      conv2d_37[0][0]                  
__________________________________________________________________________________________________
conv2d_40 (Conv2D)              (None, 1, 1, 128)    147584      conv2d_39[0][0]                  
__________________________________________________________________________________________________
conv2d_43 (Conv2D)              (None, 1, 1, 128)    147584      conv2d_42[0][0]                  
__________________________________________________________________________________________________
concatenate_8 (Concatenate)     (None, 1, 1, 1120)   0           max_pooling2d_3[0][0]            
                                                                 conv2d_38[0][0]                  
                                                                 conv2d_40[0][0]                  
                                                                 conv2d_43[0][0]                  
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 1, 1, 1120)   4480        concatenate_8[0][0]              
__________________________________________________________________________________________________
conv2d_45 (Conv2D)              (None, 1, 1, 96)     107616      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_46 (Conv2D)              (None, 1, 1, 114)    32946       conv2d_45[0][0]                  
__________________________________________________________________________________________________
conv2d_44 (Conv2D)              (None, 1, 1, 96)     107616      batch_normalization_6[0][0]      
__________________________________________________________________________________________________
conv2d_47 (Conv2D)              (None, 1, 1, 128)    43904       conv2d_46[0][0]                  
__________________________________________________________________________________________________
concatenate_9 (Concatenate)     (None, 1, 1, 224)    0           conv2d_44[0][0]                  
                                                                 conv2d_47[0][0]                  
__________________________________________________________________________________________________
conv2d_48 (Conv2D)              (None, 1, 1, 1120)   252000      concatenate_9[0][0]              
__________________________________________________________________________________________________
lambda_9 (Lambda)               (None, 1, 1, 1120)   0           batch_normalization_6[0][0]      
                                                                 conv2d_48[0][0]                  
__________________________________________________________________________________________________
activation_14 (Activation)      (None, 1, 1, 1120)   0           lambda_9[0][0]                   
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 1, 1, 1120)   4480        activation_14[0][0]              
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 1, 1, 1120)   0           batch_normalization_7[0][0]      
__________________________________________________________________________________________________
flatten (Flatten)               (None, 1120)         0           average_pooling2d[0][0]          
__________________________________________________________________________________________________
dense_10 (Dense)                (None, 896)          1004416     flatten[0][0]                    
__________________________________________________________________________________________________
dropout (Dropout)               (None, 896)          0           dense_10[0][0]                   
__________________________________________________________________________________________________
dense_11 (Dense)                (None, 2)            1794        dropout[0][0]                    
==================================================================================================
Total params: 6,171,316
Trainable params: 6,159,924
Non-trainable params: 11,392
__________________________________________________________________________________________________
In [31]:
continue_training = True

if continue_training == True:
    best_model_filepath = modelname + "_SEInceptionResNetV2best9107.hdf5"
    model.load_weights(best_model_filepath)
In [32]:
# Create checkpoint for the training
# This checkpoint performs model saving when
# an epoch gives highest testing accuracy
# filepath        = modelname + ".hdf5"
# checkpoint      = ModelCheckpoint(filepath, 
#                                   monitor='val_acc', 
#                                   verbose=0, 
#                                   save_best_only=True, 
#                                   mode='max')

#                             # Log the epoch detail into csv
# csv_logger      = CSVLogger(modelname +'.csv')
# callbacks_list  = [checkpoint, csv_logger]

def lrSchedule(epoch):
    lr  = 1e-3
    
    if epoch > 270: #190
        lr  *= 0.5e-3
        
    elif epoch > 240: #160
        lr  *= 1e-3
        
    elif epoch > 200: #140
        lr  *= 1e-2
        
    elif epoch > 150: #100
        lr  *= 1e-1
        
    print('Learning rate: ', lr)
    
    return lr

LRScheduler     = LearningRateScheduler(lrSchedule)

                            # Create checkpoint for the training
                            # This checkpoint performs model saving when
                            # an epoch gives highest testing accuracy
filepath        = modelname + ".hdf5"
checkpoint      = ModelCheckpoint(filepath, 
                                  monitor='val_acc', 
                                  verbose=0, 
                                  save_best_only=True, 
                                  mode='max')

                            # Log the epoch detail into csv
csv_logger      = CSVLogger(modelname +'.csv')
callbacks_list  = [checkpoint, csv_logger, LRScheduler]
In [33]:
# Fit the model
# This is where the training starts
# model.fit(trDat, 
#           trLbl, 
#           validation_data=(tsDat, tsLbl), 
#           epochs=60, 
#           batch_size=32,
#           callbacks=callbacks_list)

datagen = ImageDataGenerator(width_shift_range=0.25,
                             height_shift_range=0.25,
                             rotation_range=45,
                             zoom_range=0.8,
                             #zca_epsilon=1e-6,
                             #zca_whitening=True,
                             fill_mode='nearest',
                             horizontal_flip=True,
                             vertical_flip=False)

model.fit_generator(datagen.flow(trDat, trLbl, batch_size=16),
                    validation_data=(tsDat, tsLbl),
                    epochs=300, #300 
                    verbose=1,
                    steps_per_epoch=len(trDat)/16,
                    callbacks=callbacks_list)
Learning rate:  0.001
Epoch 1/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3153 - acc: 0.8709 - val_loss: 0.2465 - val_acc: 0.9034
Learning rate:  0.001
Epoch 2/300
924/924 [==============================] - 73s 80ms/step - loss: 0.3158 - acc: 0.8699 - val_loss: 0.2901 - val_acc: 0.8739
Learning rate:  0.001
Epoch 3/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3131 - acc: 0.8678 - val_loss: 0.2535 - val_acc: 0.9021
Learning rate:  0.001
Epoch 4/300
924/924 [==============================] - 73s 79ms/step - loss: 0.3026 - acc: 0.8755 - val_loss: 0.2477 - val_acc: 0.9021
Learning rate:  0.001
Epoch 5/300
924/924 [==============================] - 73s 79ms/step - loss: 0.3094 - acc: 0.8752 - val_loss: 0.2470 - val_acc: 0.9077
Learning rate:  0.001
Epoch 6/300
924/924 [==============================] - 75s 81ms/step - loss: 0.3151 - acc: 0.8726 - val_loss: 0.2410 - val_acc: 0.9091
Learning rate:  0.001
Epoch 7/300
924/924 [==============================] - 74s 80ms/step - loss: 0.3064 - acc: 0.8750 - val_loss: 0.2550 - val_acc: 0.9023
Learning rate:  0.001
Epoch 8/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3070 - acc: 0.8710 - val_loss: 0.2507 - val_acc: 0.9029
Learning rate:  0.001
Epoch 9/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3072 - acc: 0.8734 - val_loss: 0.2554 - val_acc: 0.9048
Learning rate:  0.001
Epoch 10/300
924/924 [==============================] - 72s 78ms/step - loss: 0.3032 - acc: 0.8755 - val_loss: 0.2431 - val_acc: 0.9083
Learning rate:  0.001
Epoch 11/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3051 - acc: 0.8790 - val_loss: 0.3000 - val_acc: 0.8893
Learning rate:  0.001
Epoch 12/300
924/924 [==============================] - 76s 82ms/step - loss: 0.3046 - acc: 0.8747 - val_loss: 0.2979 - val_acc: 0.8842
Learning rate:  0.001
Epoch 13/300
924/924 [==============================] - 81s 87ms/step - loss: 0.3079 - acc: 0.8784 - val_loss: 0.2669 - val_acc: 0.9015
Learning rate:  0.001
Epoch 14/300
924/924 [==============================] - 79s 85ms/step - loss: 0.3031 - acc: 0.8762 - val_loss: 0.3836 - val_acc: 0.8929
Learning rate:  0.001
Epoch 15/300
924/924 [==============================] - 95s 103ms/step - loss: 0.2959 - acc: 0.8788 - val_loss: 0.2626 - val_acc: 0.8980
Learning rate:  0.001
Epoch 16/300
924/924 [==============================] - 97s 105ms/step - loss: 0.3012 - acc: 0.8778 - val_loss: 0.2402 - val_acc: 0.9083
Learning rate:  0.001
Epoch 17/300
924/924 [==============================] - 93s 100ms/step - loss: 0.2985 - acc: 0.8799 - val_loss: 0.2450 - val_acc: 0.9007
Learning rate:  0.001
Epoch 18/300
924/924 [==============================] - 85s 92ms/step - loss: 0.3086 - acc: 0.8747 - val_loss: 0.2331 - val_acc: 0.9091
Learning rate:  0.001
Epoch 19/300
924/924 [==============================] - 92s 100ms/step - loss: 0.3022 - acc: 0.8756 - val_loss: 0.2574 - val_acc: 0.9072
Learning rate:  0.001
Epoch 20/300
924/924 [==============================] - 88s 95ms/step - loss: 0.3029 - acc: 0.8765 - val_loss: 0.2840 - val_acc: 0.8896
Learning rate:  0.001
Epoch 21/300
924/924 [==============================] - 76s 82ms/step - loss: 0.3017 - acc: 0.8780 - val_loss: 0.2878 - val_acc: 0.9056
Learning rate:  0.001
Epoch 22/300
924/924 [==============================] - 74s 80ms/step - loss: 0.3078 - acc: 0.8770 - val_loss: 0.2335 - val_acc: 0.9094
Learning rate:  0.001
Epoch 23/300
924/924 [==============================] - 84s 91ms/step - loss: 0.2951 - acc: 0.8797 - val_loss: 0.2378 - val_acc: 0.9069
Learning rate:  0.001
Epoch 24/300
924/924 [==============================] - 80s 86ms/step - loss: 0.2894 - acc: 0.8830 - val_loss: 0.2541 - val_acc: 0.9085
Learning rate:  0.001
Epoch 25/300
924/924 [==============================] - 83s 90ms/step - loss: 0.2976 - acc: 0.8780 - val_loss: 0.2717 - val_acc: 0.8953
Learning rate:  0.001
Epoch 26/300
924/924 [==============================] - 83s 89ms/step - loss: 0.2921 - acc: 0.8795 - val_loss: 0.2401 - val_acc: 0.9096
Learning rate:  0.001
Epoch 27/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2970 - acc: 0.8821 - val_loss: 0.2315 - val_acc: 0.9102
Learning rate:  0.001
Epoch 28/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2939 - acc: 0.8799 - val_loss: 0.2343 - val_acc: 0.9104
Learning rate:  0.001
Epoch 29/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2924 - acc: 0.8804 - val_loss: 0.2486 - val_acc: 0.9129
Learning rate:  0.001
Epoch 30/300
924/924 [==============================] - 80s 86ms/step - loss: 0.2945 - acc: 0.8816 - val_loss: 0.2644 - val_acc: 0.8961
Learning rate:  0.001
Epoch 31/300
924/924 [==============================] - 88s 96ms/step - loss: 0.2972 - acc: 0.8790 - val_loss: 0.2839 - val_acc: 0.8939
Learning rate:  0.001
Epoch 32/300
924/924 [==============================] - 82s 88ms/step - loss: 0.2859 - acc: 0.8828 - val_loss: 0.2421 - val_acc: 0.8977
Learning rate:  0.001
Epoch 33/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2884 - acc: 0.8841 - val_loss: 0.2702 - val_acc: 0.9018
Learning rate:  0.001
Epoch 34/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2892 - acc: 0.8833 - val_loss: 0.2261 - val_acc: 0.9142
Learning rate:  0.001
Epoch 35/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2911 - acc: 0.8830 - val_loss: 0.2525 - val_acc: 0.9107
Learning rate:  0.001
Epoch 36/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2921 - acc: 0.8823 - val_loss: 0.3270 - val_acc: 0.8866
Learning rate:  0.001
Epoch 37/300
924/924 [==============================] - 72s 77ms/step - loss: 0.2936 - acc: 0.8810 - val_loss: 0.3293 - val_acc: 0.8761
Learning rate:  0.001
Epoch 38/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2945 - acc: 0.8816 - val_loss: 0.2294 - val_acc: 0.9167
Learning rate:  0.001
Epoch 39/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2965 - acc: 0.8804 - val_loss: 0.2302 - val_acc: 0.9102
Learning rate:  0.001
Epoch 40/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2891 - acc: 0.8806 - val_loss: 0.2552 - val_acc: 0.9015
Learning rate:  0.001
Epoch 41/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2913 - acc: 0.8835 - val_loss: 0.2342 - val_acc: 0.9088
Learning rate:  0.001
Epoch 42/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2834 - acc: 0.8871 - val_loss: 0.2305 - val_acc: 0.9107
Learning rate:  0.001
Epoch 43/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2875 - acc: 0.8843 - val_loss: 0.2443 - val_acc: 0.9056
Learning rate:  0.001
Epoch 44/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2852 - acc: 0.8888 - val_loss: 0.2545 - val_acc: 0.9018
Learning rate:  0.001
Epoch 45/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2887 - acc: 0.8857 - val_loss: 0.2577 - val_acc: 0.9094
Learning rate:  0.001
Epoch 46/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2857 - acc: 0.8852 - val_loss: 0.2600 - val_acc: 0.9040
Learning rate:  0.001
Epoch 47/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2887 - acc: 0.8843 - val_loss: 0.2357 - val_acc: 0.9083
Learning rate:  0.001
Epoch 48/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2962 - acc: 0.8802 - val_loss: 0.2465 - val_acc: 0.9080
Learning rate:  0.001
Epoch 49/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2798 - acc: 0.8884 - val_loss: 0.2112 - val_acc: 0.9140
Learning rate:  0.001
Epoch 50/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2780 - acc: 0.8864 - val_loss: 0.2217 - val_acc: 0.9137
Learning rate:  0.001
Epoch 51/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2865 - acc: 0.8855 - val_loss: 0.2326 - val_acc: 0.9026
Learning rate:  0.001
Epoch 52/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2799 - acc: 0.8835 - val_loss: 0.2207 - val_acc: 0.9148
Learning rate:  0.001
Epoch 53/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2827 - acc: 0.8867 - val_loss: 0.2678 - val_acc: 0.8929
Learning rate:  0.001
Epoch 54/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2833 - acc: 0.8874 - val_loss: 0.2320 - val_acc: 0.9107
Learning rate:  0.001
Epoch 55/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3016 - acc: 0.8771 - val_loss: 0.3371 - val_acc: 0.8590
Learning rate:  0.001
Epoch 56/300
924/924 [==============================] - 71s 77ms/step - loss: 0.3562 - acc: 0.8454 - val_loss: 0.2934 - val_acc: 0.8764
Learning rate:  0.001
Epoch 57/300
924/924 [==============================] - 73s 79ms/step - loss: 0.3157 - acc: 0.8689 - val_loss: 0.2465 - val_acc: 0.9031
Learning rate:  0.001
Epoch 58/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2912 - acc: 0.8830 - val_loss: 0.2304 - val_acc: 0.9107
Learning rate:  0.001
Epoch 59/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2858 - acc: 0.8863 - val_loss: 0.2575 - val_acc: 0.9034
Learning rate:  0.001
Epoch 60/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2798 - acc: 0.8881 - val_loss: 0.2237 - val_acc: 0.9134
Learning rate:  0.001
Epoch 61/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2769 - acc: 0.8901 - val_loss: 0.2189 - val_acc: 0.9156
Learning rate:  0.001
Epoch 62/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2971 - acc: 0.8793 - val_loss: 0.2249 - val_acc: 0.9183
Learning rate:  0.001
Epoch 63/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2894 - acc: 0.8824 - val_loss: 0.2493 - val_acc: 0.9034
Learning rate:  0.001
Epoch 64/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2913 - acc: 0.8822 - val_loss: 0.2358 - val_acc: 0.9056
Learning rate:  0.001
Epoch 65/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2793 - acc: 0.8854 - val_loss: 0.2314 - val_acc: 0.9145
Learning rate:  0.001
Epoch 66/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2877 - acc: 0.8853 - val_loss: 0.2230 - val_acc: 0.9142
Learning rate:  0.001
Epoch 67/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2809 - acc: 0.8905 - val_loss: 0.2307 - val_acc: 0.9169
Learning rate:  0.001
Epoch 68/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2820 - acc: 0.8866 - val_loss: 0.2241 - val_acc: 0.9110
Learning rate:  0.001
Epoch 69/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2893 - acc: 0.8869 - val_loss: 0.2179 - val_acc: 0.9161
Learning rate:  0.001
Epoch 70/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2837 - acc: 0.8879 - val_loss: 0.2457 - val_acc: 0.9118
Learning rate:  0.001
Epoch 71/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2915 - acc: 0.8843 - val_loss: 0.2251 - val_acc: 0.9153
Learning rate:  0.001
Epoch 72/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2734 - acc: 0.8918 - val_loss: 0.2227 - val_acc: 0.9148
Learning rate:  0.001
Epoch 73/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2777 - acc: 0.8906 - val_loss: 0.2345 - val_acc: 0.9053
Learning rate:  0.001
Epoch 74/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2763 - acc: 0.8911 - val_loss: 0.2310 - val_acc: 0.9194
Learning rate:  0.001
Epoch 75/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2774 - acc: 0.8886 - val_loss: 0.2224 - val_acc: 0.9156
Learning rate:  0.001
Epoch 76/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2785 - acc: 0.8912 - val_loss: 0.2281 - val_acc: 0.9153
Learning rate:  0.001
Epoch 77/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2730 - acc: 0.8937 - val_loss: 0.2525 - val_acc: 0.9072
Learning rate:  0.001
Epoch 78/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2878 - acc: 0.8839 - val_loss: 0.2571 - val_acc: 0.8948
Learning rate:  0.001
Epoch 79/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2787 - acc: 0.8879 - val_loss: 0.2188 - val_acc: 0.9150
Learning rate:  0.001
Epoch 80/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2754 - acc: 0.8947 - val_loss: 0.2218 - val_acc: 0.9142
Learning rate:  0.001
Epoch 81/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2749 - acc: 0.8893 - val_loss: 0.2785 - val_acc: 0.8828
Learning rate:  0.001
Epoch 82/300
924/924 [==============================] - 70s 76ms/step - loss: 0.2752 - acc: 0.8915 - val_loss: 0.2476 - val_acc: 0.9021
Learning rate:  0.001
Epoch 83/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2711 - acc: 0.8930 - val_loss: 0.2829 - val_acc: 0.8839
Learning rate:  0.001
Epoch 84/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2751 - acc: 0.8912 - val_loss: 0.2153 - val_acc: 0.9167
Learning rate:  0.001
Epoch 85/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2741 - acc: 0.8898 - val_loss: 0.2131 - val_acc: 0.9180
Learning rate:  0.001
Epoch 86/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2738 - acc: 0.8938 - val_loss: 0.2315 - val_acc: 0.9123
Learning rate:  0.001
Epoch 87/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2718 - acc: 0.8918 - val_loss: 0.2559 - val_acc: 0.9091
Learning rate:  0.001
Epoch 88/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2687 - acc: 0.8931 - val_loss: 0.2346 - val_acc: 0.9075
Learning rate:  0.001
Epoch 89/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2669 - acc: 0.8952 - val_loss: 0.2254 - val_acc: 0.9175
Learning rate:  0.001
Epoch 90/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2711 - acc: 0.8943 - val_loss: 0.2145 - val_acc: 0.9188
Learning rate:  0.001
Epoch 91/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2737 - acc: 0.8912 - val_loss: 0.2203 - val_acc: 0.9142
Learning rate:  0.001
Epoch 92/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2725 - acc: 0.8928 - val_loss: 0.2408 - val_acc: 0.9069
Learning rate:  0.001
Epoch 93/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2762 - acc: 0.8886 - val_loss: 0.2106 - val_acc: 0.9213
Learning rate:  0.001
Epoch 94/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2977 - acc: 0.8787 - val_loss: 0.2229 - val_acc: 0.9123
Learning rate:  0.001
Epoch 95/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2815 - acc: 0.8887 - val_loss: 0.2695 - val_acc: 0.9142
Learning rate:  0.001
Epoch 96/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2790 - acc: 0.8909 - val_loss: 0.2455 - val_acc: 0.9069
Learning rate:  0.001
Epoch 97/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2810 - acc: 0.8863 - val_loss: 0.2835 - val_acc: 0.8818
Learning rate:  0.001
Epoch 98/300
924/924 [==============================] - 73s 79ms/step - loss: 0.3096 - acc: 0.8705 - val_loss: 0.2288 - val_acc: 0.9080
Learning rate:  0.001
Epoch 99/300
924/924 [==============================] - 74s 80ms/step - loss: 0.3195 - acc: 0.8695 - val_loss: 0.2570 - val_acc: 0.8996
Learning rate:  0.001
Epoch 100/300
924/924 [==============================] - 74s 80ms/step - loss: 0.3053 - acc: 0.8738 - val_loss: 0.2446 - val_acc: 0.8980
Learning rate:  0.001
Epoch 101/300
924/924 [==============================] - 73s 80ms/step - loss: 0.3000 - acc: 0.8809 - val_loss: 0.2369 - val_acc: 0.9067
Learning rate:  0.001
Epoch 102/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2861 - acc: 0.8881 - val_loss: 0.2346 - val_acc: 0.9104
Learning rate:  0.001
Epoch 103/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2815 - acc: 0.8877 - val_loss: 0.2113 - val_acc: 0.9180
Learning rate:  0.001
Epoch 104/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2776 - acc: 0.8912 - val_loss: 0.2311 - val_acc: 0.9102
Learning rate:  0.001
Epoch 105/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2810 - acc: 0.8899 - val_loss: 0.2213 - val_acc: 0.9145
Learning rate:  0.001
Epoch 106/300
924/924 [==============================] - 73s 80ms/step - loss: 0.2732 - acc: 0.8922 - val_loss: 0.2056 - val_acc: 0.9221
Learning rate:  0.001
Epoch 107/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2710 - acc: 0.8952 - val_loss: 0.2039 - val_acc: 0.9218
Learning rate:  0.001
Epoch 108/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2656 - acc: 0.8939 - val_loss: 0.2269 - val_acc: 0.9202
Learning rate:  0.001
Epoch 109/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2839 - acc: 0.8847 - val_loss: 0.2128 - val_acc: 0.9153
Learning rate:  0.001
Epoch 110/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2881 - acc: 0.8835 - val_loss: 0.2518 - val_acc: 0.9080
Learning rate:  0.001
Epoch 111/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2792 - acc: 0.8865 - val_loss: 0.2531 - val_acc: 0.8953
Learning rate:  0.001
Epoch 112/300
924/924 [==============================] - 73s 78ms/step - loss: 0.2652 - acc: 0.8966 - val_loss: 0.2209 - val_acc: 0.9161
Learning rate:  0.001
Epoch 113/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2731 - acc: 0.8927 - val_loss: 0.2323 - val_acc: 0.9102
Learning rate:  0.001
Epoch 114/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2660 - acc: 0.8939 - val_loss: 0.2045 - val_acc: 0.9169
Learning rate:  0.001
Epoch 115/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2668 - acc: 0.8936 - val_loss: 0.1990 - val_acc: 0.9221
Learning rate:  0.001
Epoch 116/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2737 - acc: 0.8908 - val_loss: 0.2087 - val_acc: 0.9191
Learning rate:  0.001
Epoch 117/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2660 - acc: 0.8963 - val_loss: 0.2041 - val_acc: 0.9240
Learning rate:  0.001
Epoch 118/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2701 - acc: 0.8946 - val_loss: 0.2322 - val_acc: 0.9123
Learning rate:  0.001
Epoch 119/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2715 - acc: 0.8950 - val_loss: 0.2624 - val_acc: 0.8966
Learning rate:  0.001
Epoch 120/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2664 - acc: 0.8943 - val_loss: 0.2058 - val_acc: 0.9194
Learning rate:  0.001
Epoch 121/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2645 - acc: 0.8945 - val_loss: 0.1964 - val_acc: 0.9245
Learning rate:  0.001
Epoch 122/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2598 - acc: 0.8974 - val_loss: 0.2038 - val_acc: 0.9205
Learning rate:  0.001
Epoch 123/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2657 - acc: 0.8939 - val_loss: 0.2420 - val_acc: 0.9034
Learning rate:  0.001
Epoch 124/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2733 - acc: 0.8935 - val_loss: 0.2062 - val_acc: 0.9210
Learning rate:  0.001
Epoch 125/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2660 - acc: 0.8945 - val_loss: 0.2211 - val_acc: 0.9161
Learning rate:  0.001
Epoch 126/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2608 - acc: 0.8974 - val_loss: 0.2049 - val_acc: 0.9226
Learning rate:  0.001
Epoch 127/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2608 - acc: 0.8981 - val_loss: 0.2114 - val_acc: 0.9194
Learning rate:  0.001
Epoch 128/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2638 - acc: 0.8943 - val_loss: 0.2074 - val_acc: 0.9210
Learning rate:  0.001
Epoch 129/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2611 - acc: 0.8965 - val_loss: 0.2041 - val_acc: 0.9210
Learning rate:  0.001
Epoch 130/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2636 - acc: 0.8965 - val_loss: 0.2290 - val_acc: 0.9145
Learning rate:  0.001
Epoch 131/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2626 - acc: 0.8969 - val_loss: 0.1965 - val_acc: 0.9205
Learning rate:  0.001
Epoch 132/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2658 - acc: 0.8988 - val_loss: 0.2268 - val_acc: 0.9145
Learning rate:  0.001
Epoch 133/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2619 - acc: 0.8958 - val_loss: 0.2209 - val_acc: 0.9115
Learning rate:  0.001
Epoch 134/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2601 - acc: 0.8996 - val_loss: 0.2070 - val_acc: 0.9167
Learning rate:  0.001
Epoch 135/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2539 - acc: 0.8983 - val_loss: 0.1992 - val_acc: 0.9234
Learning rate:  0.001
Epoch 136/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2581 - acc: 0.8986 - val_loss: 0.1903 - val_acc: 0.9264
Learning rate:  0.001
Epoch 137/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2635 - acc: 0.8954 - val_loss: 0.2169 - val_acc: 0.9223
Learning rate:  0.001
Epoch 138/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2555 - acc: 0.9003 - val_loss: 0.2191 - val_acc: 0.9202
Learning rate:  0.001
Epoch 139/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2599 - acc: 0.8979 - val_loss: 0.1993 - val_acc: 0.9261
Learning rate:  0.001
Epoch 140/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2606 - acc: 0.8981 - val_loss: 0.2424 - val_acc: 0.9159
Learning rate:  0.001
Epoch 141/300
924/924 [==============================] - 78s 84ms/step - loss: 0.2591 - acc: 0.8960 - val_loss: 0.2080 - val_acc: 0.9232
Learning rate:  0.001
Epoch 142/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2579 - acc: 0.8986 - val_loss: 0.2015 - val_acc: 0.9251
Learning rate:  0.001
Epoch 143/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2581 - acc: 0.9011 - val_loss: 0.2357 - val_acc: 0.9115
Learning rate:  0.001
Epoch 144/300
924/924 [==============================] - 77s 84ms/step - loss: 0.2638 - acc: 0.8973 - val_loss: 0.1943 - val_acc: 0.9194
Learning rate:  0.001
Epoch 145/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2565 - acc: 0.8985 - val_loss: 0.2083 - val_acc: 0.9175
Learning rate:  0.001
Epoch 146/300
924/924 [==============================] - 80s 86ms/step - loss: 0.2528 - acc: 0.9017 - val_loss: 0.2147 - val_acc: 0.9213
Learning rate:  0.001
Epoch 147/300
924/924 [==============================] - 79s 85ms/step - loss: 0.2596 - acc: 0.9008 - val_loss: 0.2148 - val_acc: 0.9172
Learning rate:  0.001
Epoch 148/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2585 - acc: 0.8996 - val_loss: 0.2081 - val_acc: 0.9223
Learning rate:  0.001
Epoch 149/300
924/924 [==============================] - 81s 88ms/step - loss: 0.2547 - acc: 0.8993 - val_loss: 0.2138 - val_acc: 0.9272
Learning rate:  0.001
Epoch 150/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2565 - acc: 0.8998 - val_loss: 0.2562 - val_acc: 0.8945
Learning rate:  0.001
Epoch 151/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2551 - acc: 0.9020 - val_loss: 0.2021 - val_acc: 0.9253
Learning rate:  0.0001
Epoch 152/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2405 - acc: 0.9069 - val_loss: 0.1873 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 153/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2357 - acc: 0.9063 - val_loss: 0.1885 - val_acc: 0.9259
Learning rate:  0.0001
Epoch 154/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2365 - acc: 0.9062 - val_loss: 0.1860 - val_acc: 0.9261
Learning rate:  0.0001
Epoch 155/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2326 - acc: 0.9102 - val_loss: 0.1823 - val_acc: 0.9307
Learning rate:  0.0001
Epoch 156/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2373 - acc: 0.9101 - val_loss: 0.1856 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 157/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2354 - acc: 0.9073 - val_loss: 0.1878 - val_acc: 0.9286
Learning rate:  0.0001
Epoch 158/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2376 - acc: 0.9088 - val_loss: 0.1904 - val_acc: 0.9259
Learning rate:  0.0001
Epoch 159/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2411 - acc: 0.9071 - val_loss: 0.1820 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 160/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2342 - acc: 0.9101 - val_loss: 0.1894 - val_acc: 0.9248
Learning rate:  0.0001
Epoch 161/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2361 - acc: 0.9092 - val_loss: 0.1884 - val_acc: 0.9291
Learning rate:  0.0001
Epoch 162/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2326 - acc: 0.9097 - val_loss: 0.1843 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 163/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2283 - acc: 0.9126 - val_loss: 0.1851 - val_acc: 0.9302
Learning rate:  0.0001
Epoch 164/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2281 - acc: 0.9143 - val_loss: 0.1855 - val_acc: 0.9286
Learning rate:  0.0001
Epoch 165/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2342 - acc: 0.9094 - val_loss: 0.1836 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 166/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2262 - acc: 0.9109 - val_loss: 0.1825 - val_acc: 0.9272
Learning rate:  0.0001
Epoch 167/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2242 - acc: 0.9140 - val_loss: 0.1827 - val_acc: 0.9259
Learning rate:  0.0001
Epoch 168/300
924/924 [==============================] - 78s 84ms/step - loss: 0.2325 - acc: 0.9123 - val_loss: 0.1814 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 169/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2304 - acc: 0.9089 - val_loss: 0.1871 - val_acc: 0.9278
Learning rate:  0.0001
Epoch 170/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2329 - acc: 0.9077 - val_loss: 0.1865 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 171/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2302 - acc: 0.9090 - val_loss: 0.1873 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 172/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2296 - acc: 0.9123 - val_loss: 0.1859 - val_acc: 0.9283
Learning rate:  0.0001
Epoch 173/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2247 - acc: 0.9107 - val_loss: 0.1805 - val_acc: 0.9302
Learning rate:  0.0001
Epoch 174/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2320 - acc: 0.9104 - val_loss: 0.1799 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 175/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2361 - acc: 0.9123 - val_loss: 0.1842 - val_acc: 0.9283
Learning rate:  0.0001
Epoch 176/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2325 - acc: 0.9092 - val_loss: 0.1820 - val_acc: 0.9280
Learning rate:  0.0001
Epoch 177/300
924/924 [==============================] - 78s 84ms/step - loss: 0.2329 - acc: 0.9117 - val_loss: 0.1840 - val_acc: 0.9280
Learning rate:  0.0001
Epoch 178/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2260 - acc: 0.9126 - val_loss: 0.1789 - val_acc: 0.9307
Learning rate:  0.0001
Epoch 179/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2261 - acc: 0.9127 - val_loss: 0.1805 - val_acc: 0.9291
Learning rate:  0.0001
Epoch 180/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2321 - acc: 0.9123 - val_loss: 0.1822 - val_acc: 0.9278
Learning rate:  0.0001
Epoch 181/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2252 - acc: 0.9131 - val_loss: 0.1834 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 182/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2317 - acc: 0.9106 - val_loss: 0.1811 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 183/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2249 - acc: 0.9112 - val_loss: 0.1826 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 184/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2298 - acc: 0.9116 - val_loss: 0.1813 - val_acc: 0.9297
Learning rate:  0.0001
Epoch 185/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2265 - acc: 0.9159 - val_loss: 0.1777 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 186/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2262 - acc: 0.9135 - val_loss: 0.1793 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 187/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2247 - acc: 0.9134 - val_loss: 0.1798 - val_acc: 0.9291
Learning rate:  0.0001
Epoch 188/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2265 - acc: 0.9127 - val_loss: 0.1799 - val_acc: 0.9302
Learning rate:  0.0001
Epoch 189/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2248 - acc: 0.9117 - val_loss: 0.1799 - val_acc: 0.9297
Learning rate:  0.0001
Epoch 190/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2361 - acc: 0.9110 - val_loss: 0.1851 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 191/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2239 - acc: 0.9127 - val_loss: 0.1805 - val_acc: 0.9310
Learning rate:  0.0001
Epoch 192/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2327 - acc: 0.9106 - val_loss: 0.1825 - val_acc: 0.9315
Learning rate:  0.0001
Epoch 193/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2302 - acc: 0.9123 - val_loss: 0.1811 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 194/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2208 - acc: 0.9168 - val_loss: 0.1826 - val_acc: 0.9294
Learning rate:  0.0001
Epoch 195/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2259 - acc: 0.9117 - val_loss: 0.1859 - val_acc: 0.9299
Learning rate:  0.0001
Epoch 196/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2238 - acc: 0.9117 - val_loss: 0.1829 - val_acc: 0.9291
Learning rate:  0.0001
Epoch 197/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2334 - acc: 0.9113 - val_loss: 0.1824 - val_acc: 0.9283
Learning rate:  0.0001
Epoch 198/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2263 - acc: 0.9127 - val_loss: 0.1800 - val_acc: 0.9315
Learning rate:  0.0001
Epoch 199/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2302 - acc: 0.9135 - val_loss: 0.1756 - val_acc: 0.9321
Learning rate:  0.0001
Epoch 200/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2267 - acc: 0.9135 - val_loss: 0.1840 - val_acc: 0.9288
Learning rate:  0.0001
Epoch 201/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2241 - acc: 0.9143 - val_loss: 0.1830 - val_acc: 0.9291
Learning rate:  1e-05
Epoch 202/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2207 - acc: 0.9148 - val_loss: 0.1788 - val_acc: 0.9310
Learning rate:  1e-05
Epoch 203/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2284 - acc: 0.9128 - val_loss: 0.1792 - val_acc: 0.9310
Learning rate:  1e-05
Epoch 204/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2216 - acc: 0.9167 - val_loss: 0.1793 - val_acc: 0.9307
Learning rate:  1e-05
Epoch 205/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2200 - acc: 0.9163 - val_loss: 0.1802 - val_acc: 0.9297
Learning rate:  1e-05
Epoch 206/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2218 - acc: 0.9146 - val_loss: 0.1799 - val_acc: 0.9310
Learning rate:  1e-05
Epoch 207/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2254 - acc: 0.9126 - val_loss: 0.1799 - val_acc: 0.9307
Learning rate:  1e-05
Epoch 208/300
924/924 [==============================] - 73s 78ms/step - loss: 0.2220 - acc: 0.9150 - val_loss: 0.1793 - val_acc: 0.9313
Learning rate:  1e-05
Epoch 209/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2226 - acc: 0.9131 - val_loss: 0.1795 - val_acc: 0.9307
Learning rate:  1e-05
Epoch 210/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2274 - acc: 0.9130 - val_loss: 0.1796 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 211/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2256 - acc: 0.9124 - val_loss: 0.1804 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 212/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2287 - acc: 0.9100 - val_loss: 0.1817 - val_acc: 0.9288
Learning rate:  1e-05
Epoch 213/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2256 - acc: 0.9117 - val_loss: 0.1796 - val_acc: 0.9313
Learning rate:  1e-05
Epoch 214/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2250 - acc: 0.9141 - val_loss: 0.1793 - val_acc: 0.9310
Learning rate:  1e-05
Epoch 215/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2189 - acc: 0.9143 - val_loss: 0.1798 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 216/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2271 - acc: 0.9131 - val_loss: 0.1798 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 217/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2289 - acc: 0.9139 - val_loss: 0.1795 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 218/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2226 - acc: 0.9114 - val_loss: 0.1797 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 219/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2178 - acc: 0.9157 - val_loss: 0.1789 - val_acc: 0.9294
Learning rate:  1e-05
Epoch 220/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2253 - acc: 0.9131 - val_loss: 0.1789 - val_acc: 0.9307
Learning rate:  1e-05
Epoch 221/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2164 - acc: 0.9148 - val_loss: 0.1820 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 222/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2185 - acc: 0.9184 - val_loss: 0.1792 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 223/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2208 - acc: 0.9121 - val_loss: 0.1783 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 224/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2216 - acc: 0.9167 - val_loss: 0.1791 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 225/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2292 - acc: 0.9140 - val_loss: 0.1786 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 226/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2255 - acc: 0.9145 - val_loss: 0.1794 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 227/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2279 - acc: 0.9125 - val_loss: 0.1791 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 228/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2267 - acc: 0.9142 - val_loss: 0.1806 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 229/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2183 - acc: 0.9147 - val_loss: 0.1800 - val_acc: 0.9315
Learning rate:  1e-05
Epoch 230/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2257 - acc: 0.9143 - val_loss: 0.1797 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 231/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2209 - acc: 0.9156 - val_loss: 0.1788 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 232/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2266 - acc: 0.9136 - val_loss: 0.1797 - val_acc: 0.9307
Learning rate:  1e-05
Epoch 233/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2265 - acc: 0.9150 - val_loss: 0.1805 - val_acc: 0.9299
Learning rate:  1e-05
Epoch 234/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2272 - acc: 0.9110 - val_loss: 0.1800 - val_acc: 0.9310
Learning rate:  1e-05
Epoch 235/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2237 - acc: 0.9104 - val_loss: 0.1787 - val_acc: 0.9313
Learning rate:  1e-05
Epoch 236/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2110 - acc: 0.9188 - val_loss: 0.1776 - val_acc: 0.9313
Learning rate:  1e-05
Epoch 237/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2268 - acc: 0.9140 - val_loss: 0.1797 - val_acc: 0.9302
Learning rate:  1e-05
Epoch 238/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2212 - acc: 0.9151 - val_loss: 0.1790 - val_acc: 0.9288
Learning rate:  1e-05
Epoch 239/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2318 - acc: 0.9088 - val_loss: 0.1802 - val_acc: 0.9305
Learning rate:  1e-05
Epoch 240/300
924/924 [==============================] - 74s 81ms/step - loss: 0.2204 - acc: 0.9140 - val_loss: 0.1788 - val_acc: 0.9315
Learning rate:  1e-05
Epoch 241/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2315 - acc: 0.9117 - val_loss: 0.1790 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 242/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2277 - acc: 0.9123 - val_loss: 0.1795 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 243/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2220 - acc: 0.9131 - val_loss: 0.1809 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 244/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2294 - acc: 0.9151 - val_loss: 0.1806 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 245/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2240 - acc: 0.9143 - val_loss: 0.1802 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 246/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2198 - acc: 0.9152 - val_loss: 0.1799 - val_acc: 0.9302
Learning rate:  1e-06
Epoch 247/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2185 - acc: 0.9157 - val_loss: 0.1790 - val_acc: 0.9310
Learning rate:  1e-06
Epoch 248/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2233 - acc: 0.9131 - val_loss: 0.1801 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 249/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2259 - acc: 0.9140 - val_loss: 0.1788 - val_acc: 0.9315
Learning rate:  1e-06
Epoch 250/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2153 - acc: 0.9181 - val_loss: 0.1793 - val_acc: 0.9302
Learning rate:  1e-06
Epoch 251/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2235 - acc: 0.9106 - val_loss: 0.1839 - val_acc: 0.9299
Learning rate:  1e-06
Epoch 252/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2222 - acc: 0.9163 - val_loss: 0.1785 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 253/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2271 - acc: 0.9127 - val_loss: 0.1790 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 254/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2193 - acc: 0.9146 - val_loss: 0.1783 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 255/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2229 - acc: 0.9169 - val_loss: 0.1785 - val_acc: 0.9299
Learning rate:  1e-06
Epoch 256/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2190 - acc: 0.9148 - val_loss: 0.1802 - val_acc: 0.9299
Learning rate:  1e-06
Epoch 257/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2258 - acc: 0.9108 - val_loss: 0.1810 - val_acc: 0.9299
Learning rate:  1e-06
Epoch 258/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2311 - acc: 0.9115 - val_loss: 0.1792 - val_acc: 0.9307
Learning rate:  1e-06
Epoch 259/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2230 - acc: 0.9135 - val_loss: 0.1800 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 260/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2245 - acc: 0.9138 - val_loss: 0.1784 - val_acc: 0.9313
Learning rate:  1e-06
Epoch 261/300
924/924 [==============================] - 71s 76ms/step - loss: 0.2168 - acc: 0.9142 - val_loss: 0.1798 - val_acc: 0.9294
Learning rate:  1e-06
Epoch 262/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2207 - acc: 0.9136 - val_loss: 0.1792 - val_acc: 0.9307
Learning rate:  1e-06
Epoch 263/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2232 - acc: 0.9134 - val_loss: 0.1785 - val_acc: 0.9299
Learning rate:  1e-06
Epoch 264/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2215 - acc: 0.9118 - val_loss: 0.1840 - val_acc: 0.9307
Learning rate:  1e-06
Epoch 265/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2262 - acc: 0.9134 - val_loss: 0.1793 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 266/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2237 - acc: 0.9144 - val_loss: 0.1800 - val_acc: 0.9307
Learning rate:  1e-06
Epoch 267/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2221 - acc: 0.9142 - val_loss: 0.1795 - val_acc: 0.9297
Learning rate:  1e-06
Epoch 268/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2215 - acc: 0.9129 - val_loss: 0.1807 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 269/300
924/924 [==============================] - 77s 84ms/step - loss: 0.2207 - acc: 0.9155 - val_loss: 0.1795 - val_acc: 0.9307
Learning rate:  1e-06
Epoch 270/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2227 - acc: 0.9146 - val_loss: 0.1799 - val_acc: 0.9305
Learning rate:  1e-06
Epoch 271/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2231 - acc: 0.9131 - val_loss: 0.1799 - val_acc: 0.9288
Learning rate:  5e-07
Epoch 272/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2251 - acc: 0.9111 - val_loss: 0.1788 - val_acc: 0.9302
Learning rate:  5e-07
Epoch 273/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2265 - acc: 0.9146 - val_loss: 0.1800 - val_acc: 0.9302
Learning rate:  5e-07
Epoch 274/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2199 - acc: 0.9172 - val_loss: 0.1795 - val_acc: 0.9302
Learning rate:  5e-07
Epoch 275/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2307 - acc: 0.9120 - val_loss: 0.1800 - val_acc: 0.9307
Learning rate:  5e-07
Epoch 276/300
924/924 [==============================] - 75s 82ms/step - loss: 0.2268 - acc: 0.9152 - val_loss: 0.1797 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 277/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2221 - acc: 0.9145 - val_loss: 0.1791 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 278/300
924/924 [==============================] - 76s 83ms/step - loss: 0.2249 - acc: 0.9134 - val_loss: 0.1804 - val_acc: 0.9297
Learning rate:  5e-07
Epoch 279/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2194 - acc: 0.9136 - val_loss: 0.1805 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 280/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2202 - acc: 0.9167 - val_loss: 0.1802 - val_acc: 0.9310
Learning rate:  5e-07
Epoch 281/300
924/924 [==============================] - 80s 86ms/step - loss: 0.2200 - acc: 0.9129 - val_loss: 0.1799 - val_acc: 0.9305
Learning rate:  5e-07
Epoch 282/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2226 - acc: 0.9138 - val_loss: 0.1840 - val_acc: 0.9310
Learning rate:  5e-07
Epoch 283/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2254 - acc: 0.9111 - val_loss: 0.1855 - val_acc: 0.9286
Learning rate:  5e-07
Epoch 284/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2259 - acc: 0.9148 - val_loss: 0.1797 - val_acc: 0.9305
Learning rate:  5e-07
Epoch 285/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2257 - acc: 0.9134 - val_loss: 0.1834 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 286/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2244 - acc: 0.9138 - val_loss: 0.1786 - val_acc: 0.9305
Learning rate:  5e-07
Epoch 287/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2219 - acc: 0.9123 - val_loss: 0.1796 - val_acc: 0.9294
Learning rate:  5e-07
Epoch 288/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2291 - acc: 0.9138 - val_loss: 0.1793 - val_acc: 0.9294
Learning rate:  5e-07
Epoch 289/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2221 - acc: 0.9150 - val_loss: 0.1790 - val_acc: 0.9313
Learning rate:  5e-07
Epoch 290/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2194 - acc: 0.9156 - val_loss: 0.1799 - val_acc: 0.9288
Learning rate:  5e-07
Epoch 291/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2228 - acc: 0.9143 - val_loss: 0.1802 - val_acc: 0.9305
Learning rate:  5e-07
Epoch 292/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2191 - acc: 0.9144 - val_loss: 0.1794 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 293/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2237 - acc: 0.9161 - val_loss: 0.1798 - val_acc: 0.9307
Learning rate:  5e-07
Epoch 294/300
924/924 [==============================] - 71s 77ms/step - loss: 0.2237 - acc: 0.9138 - val_loss: 0.1791 - val_acc: 0.9302
Learning rate:  5e-07
Epoch 295/300
924/924 [==============================] - 73s 79ms/step - loss: 0.2230 - acc: 0.9163 - val_loss: 0.1793 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 296/300
924/924 [==============================] - 77s 83ms/step - loss: 0.2181 - acc: 0.9163 - val_loss: 0.1793 - val_acc: 0.9299
Learning rate:  5e-07
Epoch 297/300
924/924 [==============================] - 76s 82ms/step - loss: 0.2285 - acc: 0.9134 - val_loss: 0.1796 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 298/300
924/924 [==============================] - 74s 80ms/step - loss: 0.2214 - acc: 0.9142 - val_loss: 0.1803 - val_acc: 0.9291
Learning rate:  5e-07
Epoch 299/300
924/924 [==============================] - 72s 78ms/step - loss: 0.2234 - acc: 0.9126 - val_loss: 0.1801 - val_acc: 0.9294
Learning rate:  5e-07
Epoch 300/300
924/924 [==============================] - 75s 81ms/step - loss: 0.2247 - acc: 0.9134 - val_loss: 0.1847 - val_acc: 0.9297
Out[33]:
<tensorflow.python.keras.callbacks.History at 0x18d64fcb550>
In [34]:
##### Now the training is complete, we get
# another object to load the weights
# compile it, so that we can do 
# final evaluation on it
modelGo.load_weights(filepath)
modelGo.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
In [35]:
# Make classification on the test dataset
predicts    = modelGo.predict(tsDat)

# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
Best accuracy (on testing dataset): 93.21%
              precision    recall  f1-score   support

  non-flower     0.8960    0.9350    0.9151      1446
      flower     0.9570    0.9302    0.9434      2250

    accuracy                         0.9321      3696
   macro avg     0.9265    0.9326    0.9292      3696
weighted avg     0.9331    0.9321    0.9323      3696

[[1352   94]
 [ 157 2093]]
In [36]:
import pandas as pd

records     = pd.read_csv(modelname +'.csv')
plt.figure()
plt.subplot(211)
plt.plot(records['val_loss'])
plt.plot(records['loss'])
plt.yticks([0, 0.20, 0.30, 0.4, 0.5])
plt.title('Loss value',fontsize=12)

ax          = plt.gca()
ax.set_xticklabels([])



plt.subplot(212)
plt.plot(records['val_acc'])
plt.plot(records['acc'])
plt.yticks([0.7, 0.8, 0.9, 1.0])
plt.title('Accuracy',fontsize=12)
plt.show()
In [27]:
wrong_ans_index = []

for i in range(len(predout)):
    if predout[i] != testout[i]:
        wrong_ans_index.append(i)
In [28]:
wrong_ans_index = list(set(wrong_ans_index))
In [29]:
# Randomly show X examples of that was wrong

dataset = tsDatOrg #flowers #fungus #rocks

for index in wrong_ans_index:
    #index = wrong_ans_index[random.randint(0, len(wrong_ans_index)-1)]
    print("Showing %s index image" %(index))
    print("Predicted as %s but is actually %s" %(predout[index], testout[index]))
    imgplot = plt.imshow(data[index])
    plt.show()
Showing 3585 index image
Predicted as 1 but is actually 0
Showing 3155 index image
Predicted as 1 but is actually 0
Showing 2051 index image
Predicted as 0 but is actually 1
Showing 2053 index image
Predicted as 0 but is actually 1
Showing 510 index image
Predicted as 0 but is actually 1
Showing 3079 index image
Predicted as 1 but is actually 0
Showing 3080 index image
Predicted as 1 but is actually 0
Showing 1547 index image
Predicted as 0 but is actually 1
Showing 3085 index image
Predicted as 1 but is actually 0
Showing 3666 index image
Predicted as 1 but is actually 0
Showing 17 index image
Predicted as 0 but is actually 1
Showing 1554 index image
Predicted as 0 but is actually 1
Showing 3603 index image
Predicted as 1 but is actually 0
Showing 2583 index image
Predicted as 1 but is actually 0
Showing 536 index image
Predicted as 0 but is actually 1
Showing 1049 index image
Predicted as 0 but is actually 1
Showing 2074 index image
Predicted as 0 but is actually 1
Showing 2584 index image
Predicted as 1 but is actually 0
Showing 2585 index image
Predicted as 1 but is actually 0
Showing 2589 index image
Predicted as 1 but is actually 0
Showing 3610 index image
Predicted as 1 but is actually 0
Showing 2591 index image
Predicted as 1 but is actually 0
Showing 32 index image
Predicted as 0 but is actually 1
Showing 2080 index image
Predicted as 0 but is actually 1
Showing 1572 index image
Predicted as 0 but is actually 1
Showing 37 index image
Predicted as 0 but is actually 1
Showing 3112 index image
Predicted as 1 but is actually 0
Showing 1065 index image
Predicted as 0 but is actually 1
Showing 3625 index image
Predicted as 1 but is actually 0
Showing 3116 index image
Predicted as 1 but is actually 0
Showing 2094 index image
Predicted as 0 but is actually 1
Showing 3119 index image
Predicted as 1 but is actually 0
Showing 2610 index image
Predicted as 1 but is actually 0
Showing 564 index image
Predicted as 0 but is actually 1
Showing 1079 index image
Predicted as 0 but is actually 1
Showing 570 index image
Predicted as 0 but is actually 1
Showing 571 index image
Predicted as 0 but is actually 1
Showing 1594 index image
Predicted as 0 but is actually 1
Showing 2111 index image
Predicted as 0 but is actually 1
Showing 2623 index image
Predicted as 1 but is actually 0
Showing 3136 index image
Predicted as 1 but is actually 0
Showing 3647 index image
Predicted as 1 but is actually 0
Showing 3140 index image
Predicted as 1 but is actually 0
Showing 69 index image
Predicted as 0 but is actually 1
Showing 70 index image
Predicted as 0 but is actually 1
Showing 3141 index image
Predicted as 1 but is actually 0
Showing 2632 index image
Predicted as 1 but is actually 0
Showing 3656 index image
Predicted as 1 but is actually 0
Showing 3657 index image
Predicted as 1 but is actually 0
Showing 75 index image
Predicted as 0 but is actually 1
Showing 2127 index image
Predicted as 0 but is actually 1
Showing 1616 index image
Predicted as 0 but is actually 1
Showing 1618 index image
Predicted as 0 but is actually 1
Showing 83 index image
Predicted as 0 but is actually 1
Showing 1619 index image
Predicted as 0 but is actually 1
Showing 597 index image
Predicted as 0 but is actually 1
Showing 1109 index image
Predicted as 0 but is actually 1
Showing 2132 index image
Predicted as 0 but is actually 1
Showing 88 index image
Predicted as 0 but is actually 1
Showing 89 index image
Predicted as 0 but is actually 1
Showing 601 index image
Predicted as 0 but is actually 1
Showing 1114 index image
Predicted as 0 but is actually 1
Showing 2134 index image
Predicted as 0 but is actually 1
Showing 3161 index image
Predicted as 1 but is actually 0
Showing 2654 index image
Predicted as 1 but is actually 0
Showing 2144 index image
Predicted as 0 but is actually 1
Showing 1121 index image
Predicted as 0 but is actually 1
Showing 610 index image
Predicted as 0 but is actually 1
Showing 2656 index image
Predicted as 1 but is actually 0
Showing 3680 index image
Predicted as 1 but is actually 0
Showing 2663 index image
Predicted as 1 but is actually 0
Showing 1642 index image
Predicted as 0 but is actually 1
Showing 3178 index image
Predicted as 1 but is actually 0
Showing 2156 index image
Predicted as 0 but is actually 1
Showing 621 index image
Predicted as 0 but is actually 1
Showing 3182 index image
Predicted as 1 but is actually 0
Showing 3183 index image
Predicted as 1 but is actually 0
Showing 2672 index image
Predicted as 1 but is actually 0
Showing 628 index image
Predicted as 0 but is actually 1
Showing 631 index image
Predicted as 0 but is actually 1
Showing 3668 index image
Predicted as 1 but is actually 0
Showing 1145 index image
Predicted as 0 but is actually 1
Showing 636 index image
Predicted as 0 but is actually 1
Showing 3197 index image
Predicted as 1 but is actually 0
Showing 2176 index image
Predicted as 0 but is actually 1
Showing 2177 index image
Predicted as 0 but is actually 1
Showing 130 index image
Predicted as 0 but is actually 1
Showing 642 index image
Predicted as 0 but is actually 1
Showing 2691 index image
Predicted as 1 but is actually 0
Showing 645 index image
Predicted as 0 but is actually 1
Showing 1669 index image
Predicted as 0 but is actually 1
Showing 647 index image
Predicted as 0 but is actually 1
Showing 2181 index image
Predicted as 0 but is actually 1
Showing 137 index image
Predicted as 0 but is actually 1
Showing 3203 index image
Predicted as 1 but is actually 0
Showing 3207 index image
Predicted as 1 but is actually 0
Showing 1165 index image
Predicted as 0 but is actually 1
Showing 2189 index image
Predicted as 0 but is actually 1
Showing 2191 index image
Predicted as 0 but is actually 1
Showing 656 index image
Predicted as 0 but is actually 1
Showing 3215 index image
Predicted as 1 but is actually 0
Showing 146 index image
Predicted as 0 but is actually 1
Showing 147 index image
Predicted as 0 but is actually 1
Showing 2195 index image
Predicted as 0 but is actually 1
Showing 662 index image
Predicted as 0 but is actually 1
Showing 1175 index image
Predicted as 0 but is actually 1
Showing 1176 index image
Predicted as 0 but is actually 1
Showing 2199 index image
Predicted as 0 but is actually 1
Showing 1690 index image
Predicted as 0 but is actually 1
Showing 2713 index image
Predicted as 1 but is actually 0
Showing 156 index image
Predicted as 0 but is actually 1
Showing 2717 index image
Predicted as 1 but is actually 0
Showing 3224 index image
Predicted as 1 but is actually 0
Showing 3226 index image
Predicted as 1 but is actually 0
Showing 2725 index image
Predicted as 1 but is actually 0
Showing 1704 index image
Predicted as 0 but is actually 1
Showing 2216 index image
Predicted as 0 but is actually 1
Showing 1706 index image
Predicted as 0 but is actually 1
Showing 171 index image
Predicted as 0 but is actually 1
Showing 3244 index image
Predicted as 1 but is actually 0
Showing 2221 index image
Predicted as 0 but is actually 1
Showing 174 index image
Predicted as 0 but is actually 1
Showing 2737 index image
Predicted as 1 but is actually 0
Showing 1715 index image
Predicted as 0 but is actually 1
Showing 2227 index image
Predicted as 0 but is actually 1
Showing 3254 index image
Predicted as 1 but is actually 0
Showing 3259 index image
Predicted as 1 but is actually 0
Showing 2749 index image
Predicted as 1 but is actually 0
Showing 3262 index image
Predicted as 1 but is actually 0
Showing 1731 index image
Predicted as 0 but is actually 1
Showing 1220 index image
Predicted as 0 but is actually 1
Showing 709 index image
Predicted as 0 but is actually 1
Showing 2246 index image
Predicted as 0 but is actually 1
Showing 3268 index image
Predicted as 1 but is actually 0
Showing 200 index image
Predicted as 0 but is actually 1
Showing 2248 index image
Predicted as 0 but is actually 1
Showing 2249 index image
Predicted as 0 but is actually 1
Showing 2760 index image
Predicted as 1 but is actually 0
Showing 2763 index image
Predicted as 1 but is actually 0
Showing 3271 index image
Predicted as 1 but is actually 0
Showing 718 index image
Predicted as 0 but is actually 1
Showing 2256 index image
Predicted as 1 but is actually 0
Showing 1235 index image
Predicted as 0 but is actually 1
Showing 2772 index image
Predicted as 1 but is actually 0
Showing 726 index image
Predicted as 0 but is actually 1
Showing 1750 index image
Predicted as 0 but is actually 1
Showing 1756 index image
Predicted as 0 but is actually 1
Showing 2269 index image
Predicted as 1 but is actually 0
Showing 2781 index image
Predicted as 1 but is actually 0
Showing 224 index image
Predicted as 0 but is actually 1
Showing 736 index image
Predicted as 0 but is actually 1
Showing 2273 index image
Predicted as 1 but is actually 0
Showing 2784 index image
Predicted as 1 but is actually 0
Showing 3297 index image
Predicted as 1 but is actually 0
Showing 1253 index image
Predicted as 0 but is actually 1
Showing 2793 index image
Predicted as 1 but is actually 0
Showing 235 index image
Predicted as 0 but is actually 1
Showing 1264 index image
Predicted as 0 but is actually 1
Showing 753 index image
Predicted as 0 but is actually 1
Showing 2803 index image
Predicted as 1 but is actually 0
Showing 3316 index image
Predicted as 1 but is actually 0
Showing 253 index image
Predicted as 0 but is actually 1
Showing 2302 index image
Predicted as 1 but is actually 0
Showing 2815 index image
Predicted as 1 but is actually 0
Showing 2816 index image
Predicted as 1 but is actually 0
Showing 2305 index image
Predicted as 1 but is actually 0
Showing 2821 index image
Predicted as 1 but is actually 0
Showing 776 index image
Predicted as 0 but is actually 1
Showing 267 index image
Predicted as 0 but is actually 1
Showing 1804 index image
Predicted as 0 but is actually 1
Showing 781 index image
Predicted as 0 but is actually 1
Showing 270 index image
Predicted as 0 but is actually 1
Showing 271 index image
Predicted as 0 but is actually 1
Showing 1807 index image
Predicted as 0 but is actually 1
Showing 1808 index image
Predicted as 0 but is actually 1
Showing 1298 index image
Predicted as 0 but is actually 1
Showing 3342 index image
Predicted as 1 but is actually 0
Showing 3347 index image
Predicted as 1 but is actually 0
Showing 3514 index image
Predicted as 1 but is actually 0
Showing 2839 index image
Predicted as 1 but is actually 0
Showing 281 index image
Predicted as 0 but is actually 1
Showing 795 index image
Predicted as 0 but is actually 1
Showing 2331 index image
Predicted as 1 but is actually 0
Showing 3356 index image
Predicted as 1 but is actually 0
Showing 1823 index image
Predicted as 0 but is actually 1
Showing 1824 index image
Predicted as 0 but is actually 1
Showing 3361 index image
Predicted as 1 but is actually 0
Showing 1828 index image
Predicted as 0 but is actually 1
Showing 2340 index image
Predicted as 1 but is actually 0
Showing 2855 index image
Predicted as 1 but is actually 0
Showing 2858 index image
Predicted as 1 but is actually 0
Showing 3371 index image
Predicted as 1 but is actually 0
Showing 1836 index image
Predicted as 0 but is actually 1
Showing 2349 index image
Predicted as 1 but is actually 0
Showing 3372 index image
Predicted as 1 but is actually 0
Showing 3373 index image
Predicted as 1 but is actually 0
Showing 304 index image
Predicted as 0 but is actually 1
Showing 1329 index image
Predicted as 0 but is actually 1
Showing 3419 index image
Predicted as 1 but is actually 0
Showing 820 index image
Predicted as 0 but is actually 1
Showing 1845 index image
Predicted as 0 but is actually 1
Showing 3521 index image
Predicted as 1 but is actually 0
Showing 312 index image
Predicted as 0 but is actually 1
Showing 2873 index image
Predicted as 1 but is actually 0
Showing 3513 index image
Predicted as 1 but is actually 0
Showing 828 index image
Predicted as 0 but is actually 1
Showing 1340 index image
Predicted as 0 but is actually 1
Showing 1854 index image
Predicted as 0 but is actually 1
Showing 836 index image
Predicted as 0 but is actually 1
Showing 1861 index image
Predicted as 0 but is actually 1
Showing 3398 index image
Predicted as 1 but is actually 0
Showing 2375 index image
Predicted as 1 but is actually 0
Showing 2376 index image
Predicted as 1 but is actually 0
Showing 2377 index image
Predicted as 1 but is actually 0
Showing 3400 index image
Predicted as 1 but is actually 0
Showing 1355 index image
Predicted as 0 but is actually 1
Showing 1868 index image
Predicted as 0 but is actually 1
Showing 3404 index image
Predicted as 1 but is actually 0
Showing 2384 index image
Predicted as 1 but is actually 0
Showing 3410 index image
Predicted as 1 but is actually 0
Showing 852 index image
Predicted as 0 but is actually 1
Showing 854 index image
Predicted as 0 but is actually 1
Showing 2903 index image
Predicted as 1 but is actually 0
Showing 3415 index image
Predicted as 1 but is actually 0
Showing 2393 index image
Predicted as 1 but is actually 0
Showing 2394 index image
Predicted as 1 but is actually 0
Showing 347 index image
Predicted as 0 but is actually 1
Showing 1371 index image
Predicted as 0 but is actually 1
Showing 2905 index image
Predicted as 1 but is actually 0
Showing 2398 index image
Predicted as 1 but is actually 0
Showing 863 index image
Predicted as 0 but is actually 1
Showing 2400 index image
Predicted as 1 but is actually 0
Showing 2909 index image
Predicted as 1 but is actually 0
Showing 354 index image
Predicted as 0 but is actually 1
Showing 1379 index image
Predicted as 0 but is actually 1
Showing 2914 index image
Predicted as 1 but is actually 0
Showing 1382 index image
Predicted as 0 but is actually 1
Showing 2407 index image
Predicted as 1 but is actually 0
Showing 1384 index image
Predicted as 0 but is actually 1
Showing 876 index image
Predicted as 0 but is actually 1
Showing 877 index image
Predicted as 0 but is actually 1
Showing 1902 index image
Predicted as 0 but is actually 1
Showing 367 index image
Predicted as 0 but is actually 1
Showing 2926 index image
Predicted as 1 but is actually 0
Showing 1394 index image
Predicted as 0 but is actually 1
Showing 1397 index image
Predicted as 0 but is actually 1
Showing 375 index image
Predicted as 0 but is actually 1
Showing 376 index image
Predicted as 0 but is actually 1
Showing 2423 index image
Predicted as 1 but is actually 0
Showing 2426 index image
Predicted as 1 but is actually 0
Showing 2942 index image
Predicted as 1 but is actually 0
Showing 2432 index image
Predicted as 1 but is actually 0
Showing 2947 index image
Predicted as 1 but is actually 0
Showing 1412 index image
Predicted as 0 but is actually 1
Showing 390 index image
Predicted as 0 but is actually 1
Showing 2441 index image
Predicted as 1 but is actually 0
Showing 2442 index image
Predicted as 1 but is actually 0
Showing 910 index image
Predicted as 0 but is actually 1
Showing 1426 index image
Predicted as 0 but is actually 1
Showing 3474 index image
Predicted as 1 but is actually 0
Showing 2455 index image
Predicted as 1 but is actually 0
Showing 2968 index image
Predicted as 1 but is actually 0
Showing 921 index image
Predicted as 0 but is actually 1
Showing 1946 index image
Predicted as 0 but is actually 1
Showing 1438 index image
Predicted as 0 but is actually 1
Showing 1950 index image
Predicted as 0 but is actually 1
Showing 416 index image
Predicted as 0 but is actually 1
Showing 2976 index image
Predicted as 1 but is actually 0
Showing 1957 index image
Predicted as 0 but is actually 1
Showing 3493 index image
Predicted as 1 but is actually 0
Showing 935 index image
Predicted as 0 but is actually 1
Showing 3494 index image
Predicted as 1 but is actually 0
Showing 1963 index image
Predicted as 0 but is actually 1
Showing 1454 index image
Predicted as 0 but is actually 1
Showing 1967 index image
Predicted as 0 but is actually 1
Showing 434 index image
Predicted as 0 but is actually 1
Showing 2482 index image
Predicted as 1 but is actually 0
Showing 2483 index image
Predicted as 1 but is actually 0
Showing 2995 index image
Predicted as 1 but is actually 0
Showing 950 index image
Predicted as 0 but is actually 1
Showing 951 index image
Predicted as 0 but is actually 1
Showing 440 index image
Predicted as 0 but is actually 1
Showing 1465 index image
Predicted as 0 but is actually 1
Showing 954 index image
Predicted as 0 but is actually 1
Showing 1978 index image
Predicted as 0 but is actually 1
Showing 2486 index image
Predicted as 1 but is actually 0
Showing 1469 index image
Predicted as 0 but is actually 1
Showing 958 index image
Predicted as 0 but is actually 1
Showing 1981 index image
Predicted as 0 but is actually 1
Showing 1472 index image
Predicted as 0 but is actually 1
Showing 3004 index image
Predicted as 1 but is actually 0
Showing 962 index image
Predicted as 0 but is actually 1
Showing 1986 index image
Predicted as 0 but is actually 1
Showing 1988 index image
Predicted as 0 but is actually 1
Showing 2498 index image
Predicted as 1 but is actually 0
Showing 2499 index image
Predicted as 1 but is actually 0
Showing 455 index image
Predicted as 0 but is actually 1
Showing 456 index image
Predicted as 0 but is actually 1
Showing 1480 index image
Predicted as 0 but is actually 1
Showing 2500 index image
Predicted as 1 but is actually 0
Showing 2502 index image
Predicted as 1 but is actually 0
Showing 3017 index image
Predicted as 1 but is actually 0
Showing 463 index image
Predicted as 0 but is actually 1
Showing 3028 index image
Predicted as 1 but is actually 0
Showing 1493 index image
Predicted as 0 but is actually 1
Showing 2517 index image
Predicted as 1 but is actually 0
Showing 2519 index image
Predicted as 1 but is actually 0
Showing 3542 index image
Predicted as 1 but is actually 0
Showing 2521 index image
Predicted as 1 but is actually 0
Showing 986 index image
Predicted as 0 but is actually 1
Showing 475 index image
Predicted as 0 but is actually 1
Showing 3033 index image
Predicted as 1 but is actually 0
Showing 3552 index image
Predicted as 1 but is actually 0
Showing 3044 index image
Predicted as 1 but is actually 0
Showing 486 index image
Predicted as 0 but is actually 1
Showing 2025 index image
Predicted as 0 but is actually 1
Showing 1517 index image
Predicted as 0 but is actually 1
Showing 2029 index image
Predicted as 0 but is actually 1
Showing 495 index image
Predicted as 0 but is actually 1
Showing 3567 index image
Predicted as 1 but is actually 0
Showing 1010 index image
Predicted as 0 but is actually 1
Showing 3058 index image
Predicted as 1 but is actually 0
Showing 3059 index image
Predicted as 1 but is actually 0
Showing 1525 index image
Predicted as 0 but is actually 1
Showing 2549 index image
Predicted as 1 but is actually 0
Showing 3062 index image
Predicted as 1 but is actually 0
Showing 3574 index image
Predicted as 1 but is actually 0
Showing 1530 index image
Predicted as 0 but is actually 1
Showing 1022 index image
Predicted as 0 but is actually 1

Stacking 3 NNs?

In [30]:
modelGo1     = create_inception_v4() # This is used for final testing
modelGo2     = create_inception_v4() # This is used for final testing
modelGo3     = create_inception_v4() # This is used for final testing

modelGo1.load_weights("FlowerPower_InceptionV4best9305.hdf5")
modelGo1.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])

modelGo2.load_weights("FlowerPower_InceptionV4best9307.hdf5")
modelGo2.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])

modelGo3.load_weights("FlowerPower_InceptionV4best9418.hdf5")
modelGo3.compile(loss='categorical_crossentropy', 
                optimizer='adam', 
                metrics=['accuracy'])
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-30-1243c45e3d6c> in <module>
      3 modelGo3     = create_inception_v4() # This is used for final testing
      4 
----> 5 modelGo1.load_weights("FlowerPower_InceptionV4best9305.hdf5")
      6 modelGo1.compile(loss='categorical_crossentropy', 
      7                 optimizer='adam',

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\engine\network.py in load_weights(self, filepath, by_name)
   1446         saving.load_weights_from_hdf5_group_by_name(f, self.layers)
   1447       else:
-> 1448         saving.load_weights_from_hdf5_group(f, self.layers)
   1449 
   1450   def _post_build_cleanup(self):

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\engine\saving.py in load_weights_from_hdf5_group(f, layers)
    800                        str(len(weight_values)) + ' elements.')
    801     weight_value_tuples += zip(symbolic_weights, weight_values)
--> 802   K.batch_set_value(weight_value_tuples)
    803 
    804 

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\keras\backend.py in batch_set_value(tuples)
   2712           assign_placeholder = array_ops.placeholder(tf_dtype,
   2713                                                      shape=value.shape)
-> 2714           assign_op = x.assign(assign_placeholder)
   2715           x._assign_placeholder = assign_placeholder
   2716           x._assign_op = assign_op

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in assign(self, value, use_locking, name, read_value)
    961     with _handle_graph(self.handle):
    962       value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
--> 963       self._shape.assert_is_compatible_with(value_tensor.shape)
    964       assign_op = gen_resource_variable_ops.assign_variable_op(
    965           self.handle, value_tensor, name=name)

D:\DocumentsDDrive\Installed_Files\Anaconda3\envs\tf-gpu\lib\site-packages\tensorflow\python\framework\tensor_shape.py in assert_is_compatible_with(self, other)
    845     """
    846     if not self.is_compatible_with(other):
--> 847       raise ValueError("Shapes %s and %s are incompatible" % (self, other))
    848 
    849   def most_specific_compatible_shape(self, other):

ValueError: Shapes (1, 1, 192, 96) and (192, 192, 1, 1) are incompatible
In [ ]:
predicts1    = modelGo1.predict(tsDat)
predicts2    = modelGo2.predict(tsDat)
predicts3    = modelGo3.predict(tsDat)
In [ ]:
predicts_ave = []

for i in range(len(predicts1)):
    ave0 = (predicts1[i][0] + predicts2[i][0] + predicts3[i][0])/3
    ave1 = (predicts1[i][1] + predicts2[i][1] + predicts3[i][1])/3
    predicts_ave.append([ave0, ave1])
In [ ]:
predicts_ave = np.array(predicts_ave)
In [ ]:
# Prepare the classification output
# for the classification report
predout     = np.argmax(predicts_ave,axis=1)
testout     = np.argmax(tsLbl,axis=1)
labelname   = ['non-flower', 'flower']
                                            # the labels for the classfication report


testScores  = metrics.accuracy_score(testout,predout)
confusion   = metrics.confusion_matrix(testout,predout)


print("Best accuracy (on testing dataset): %.2f%%" % (testScores*100))
print(metrics.classification_report(testout,predout,target_names=labelname,digits=4))
print(confusion)
In [ ]: